4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
62 bool singlestep_enabled
;
65 /* Information carried about a condition to be evaluated. */
72 struct { TCGv_i64 a
, b
; } s64
;
73 struct { TCGv_i32 a
, b
; } s32
;
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
97 S390CPU
*cpu
= S390_CPU(cs
);
98 CPUS390XState
*env
= &cpu
->env
;
101 if (env
->cc_op
> 3) {
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
103 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
105 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
106 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
109 for (i
= 0; i
< 16; i
++) {
110 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
112 cpu_fprintf(f
, "\n");
118 for (i
= 0; i
< 16; i
++) {
119 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
121 cpu_fprintf(f
, "\n");
127 for (i
= 0; i
< 32; i
++) {
128 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
129 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
130 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i
= 0; i
< 16; i
++) {
135 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
137 cpu_fprintf(f
, "\n");
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i
= 0; i
< CC_OP_MAX
; i
++) {
146 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
147 inline_branch_miss
[i
], inline_branch_hit
[i
]);
151 cpu_fprintf(f
, "\n");
154 static TCGv_i64 psw_addr
;
155 static TCGv_i64 psw_mask
;
156 static TCGv_i64 gbea
;
158 static TCGv_i32 cc_op
;
159 static TCGv_i64 cc_src
;
160 static TCGv_i64 cc_dst
;
161 static TCGv_i64 cc_vr
;
163 static char cpu_reg_names
[32][4];
164 static TCGv_i64 regs
[16];
165 static TCGv_i64 fregs
[16];
167 void s390x_translate_init(void)
171 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
172 tcg_ctx
.tcg_env
= cpu_env
;
173 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
174 offsetof(CPUS390XState
, psw
.addr
),
176 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
177 offsetof(CPUS390XState
, psw
.mask
),
179 gbea
= tcg_global_mem_new_i64(cpu_env
,
180 offsetof(CPUS390XState
, gbea
),
183 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
185 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
187 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
189 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
192 for (i
= 0; i
< 16; i
++) {
193 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
194 regs
[i
] = tcg_global_mem_new(cpu_env
,
195 offsetof(CPUS390XState
, regs
[i
]),
199 for (i
= 0; i
< 16; i
++) {
200 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
201 fregs
[i
] = tcg_global_mem_new(cpu_env
,
202 offsetof(CPUS390XState
, vregs
[i
][0].d
),
203 cpu_reg_names
[i
+ 16]);
207 static TCGv_i64
load_reg(int reg
)
209 TCGv_i64 r
= tcg_temp_new_i64();
210 tcg_gen_mov_i64(r
, regs
[reg
]);
214 static TCGv_i64
load_freg32_i64(int reg
)
216 TCGv_i64 r
= tcg_temp_new_i64();
217 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
221 static void store_reg(int reg
, TCGv_i64 v
)
223 tcg_gen_mov_i64(regs
[reg
], v
);
226 static void store_freg(int reg
, TCGv_i64 v
)
228 tcg_gen_mov_i64(fregs
[reg
], v
);
231 static void store_reg32_i64(int reg
, TCGv_i64 v
)
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
237 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
239 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
242 static void store_freg32_i64(int reg
, TCGv_i64 v
)
244 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
247 static void return_low128(TCGv_i64 dest
)
249 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
252 static void update_psw_addr(DisasContext
*s
)
255 tcg_gen_movi_i64(psw_addr
, s
->pc
);
258 static void per_branch(DisasContext
*s
, bool to_next
)
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea
, s
->pc
);
263 if (s
->tb
->flags
& FLAG_MASK_PER
) {
264 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
265 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
267 tcg_temp_free_i64(next_pc
);
273 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
274 TCGv_i64 arg1
, TCGv_i64 arg2
)
276 #ifndef CONFIG_USER_ONLY
277 if (s
->tb
->flags
& FLAG_MASK_PER
) {
278 TCGLabel
*lab
= gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
281 tcg_gen_movi_i64(gbea
, s
->pc
);
282 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
286 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
287 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
288 tcg_temp_free_i64(pc
);
293 static void per_breaking_event(DisasContext
*s
)
295 tcg_gen_movi_i64(gbea
, s
->pc
);
298 static void update_cc_op(DisasContext
*s
)
300 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
301 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
305 static void potential_page_fault(DisasContext
*s
)
311 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
313 return (uint64_t)cpu_lduw_code(env
, pc
);
316 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
321 static int get_mem_index(DisasContext
*s
)
323 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
324 case PSW_ASC_PRIMARY
>> 32:
326 case PSW_ASC_SECONDARY
>> 32:
328 case PSW_ASC_HOME
>> 32:
336 static void gen_exception(int excp
)
338 TCGv_i32 tmp
= tcg_const_i32(excp
);
339 gen_helper_exception(cpu_env
, tmp
);
340 tcg_temp_free_i32(tmp
);
343 static void gen_program_exception(DisasContext
*s
, int code
)
347 /* Remember what pgm exeption this was. */
348 tmp
= tcg_const_i32(code
);
349 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
350 tcg_temp_free_i32(tmp
);
352 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
353 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
354 tcg_temp_free_i32(tmp
);
356 /* Advance past instruction. */
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM
);
367 static inline void gen_illegal_opcode(DisasContext
*s
)
369 gen_program_exception(s
, PGM_OPERATION
);
372 static inline void gen_trap(DisasContext
*s
)
376 /* Set DXC to 0xff. */
377 t
= tcg_temp_new_i32();
378 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
379 tcg_gen_ori_i32(t
, t
, 0xff00);
380 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
381 tcg_temp_free_i32(t
);
383 gen_program_exception(s
, PGM_DATA
);
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext
*s
)
389 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
390 gen_program_exception(s
, PGM_PRIVILEGED
);
395 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
397 TCGv_i64 tmp
= tcg_temp_new_i64();
398 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
403 /* Note that addi optimizes the imm==0 case. */
405 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
406 tcg_gen_addi_i64(tmp
, tmp
, d2
);
408 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
410 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
416 tcg_gen_movi_i64(tmp
, d2
);
419 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
425 static inline bool live_cc_data(DisasContext
*s
)
427 return (s
->cc_op
!= CC_OP_DYNAMIC
428 && s
->cc_op
!= CC_OP_STATIC
432 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
434 if (live_cc_data(s
)) {
435 tcg_gen_discard_i64(cc_src
);
436 tcg_gen_discard_i64(cc_dst
);
437 tcg_gen_discard_i64(cc_vr
);
439 s
->cc_op
= CC_OP_CONST0
+ val
;
442 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
444 if (live_cc_data(s
)) {
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_discard_i64(cc_vr
);
448 tcg_gen_mov_i64(cc_dst
, dst
);
452 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
455 if (live_cc_data(s
)) {
456 tcg_gen_discard_i64(cc_vr
);
458 tcg_gen_mov_i64(cc_src
, src
);
459 tcg_gen_mov_i64(cc_dst
, dst
);
463 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
464 TCGv_i64 dst
, TCGv_i64 vr
)
466 tcg_gen_mov_i64(cc_src
, src
);
467 tcg_gen_mov_i64(cc_dst
, dst
);
468 tcg_gen_mov_i64(cc_vr
, vr
);
472 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
474 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
477 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
479 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
482 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
484 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
487 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
489 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext
*s
)
495 if (live_cc_data(s
)) {
496 tcg_gen_discard_i64(cc_src
);
497 tcg_gen_discard_i64(cc_dst
);
498 tcg_gen_discard_i64(cc_vr
);
500 s
->cc_op
= CC_OP_STATIC
;
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext
*s
)
506 TCGv_i32 local_cc_op
;
509 TCGV_UNUSED_I32(local_cc_op
);
510 TCGV_UNUSED_I64(dummy
);
513 dummy
= tcg_const_i64(0);
527 local_cc_op
= tcg_const_i32(s
->cc_op
);
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
547 /* env->cc_op already is the cc value */
562 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
567 case CC_OP_LTUGTU_32
:
568 case CC_OP_LTUGTU_64
:
575 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
590 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
600 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
601 tcg_temp_free_i32(local_cc_op
);
603 if (!TCGV_IS_UNUSED_I64(dummy
)) {
604 tcg_temp_free_i64(dummy
);
607 /* We now have cc in cc_op as constant */
611 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
613 if (unlikely(s
->singlestep_enabled
) ||
614 (s
->tb
->cflags
& CF_LAST_IO
) ||
615 (s
->tb
->flags
& FLAG_MASK_PER
)) {
618 #ifndef CONFIG_USER_ONLY
619 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
620 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
626 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
628 #ifdef DEBUG_INLINE_BRANCHES
629 inline_branch_miss
[cc_op
]++;
633 static void account_inline_branch(DisasContext
*s
, int cc_op
)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_hit
[cc_op
]++;
640 /* Table of mask values to comparison codes, given a comparison as input.
641 For such, CC=3 should not be possible. */
642 static const TCGCond ltgt_cond
[16] = {
643 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
644 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
645 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
646 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
647 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
648 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
649 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
650 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
653 /* Table of mask values to comparison codes, given a logic op as input.
654 For such, only CC=0 and CC=1 should be possible. */
655 static const TCGCond nz_cond
[16] = {
656 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
657 TCG_COND_NEVER
, TCG_COND_NEVER
,
658 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
659 TCG_COND_NE
, TCG_COND_NE
,
660 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
661 TCG_COND_EQ
, TCG_COND_EQ
,
662 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
663 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
666 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
667 details required to generate a TCG comparison. */
668 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
671 enum cc_op old_cc_op
= s
->cc_op
;
673 if (mask
== 15 || mask
== 0) {
674 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
677 c
->g1
= c
->g2
= true;
682 /* Find the TCG condition for the mask + cc op. */
688 cond
= ltgt_cond
[mask
];
689 if (cond
== TCG_COND_NEVER
) {
692 account_inline_branch(s
, old_cc_op
);
695 case CC_OP_LTUGTU_32
:
696 case CC_OP_LTUGTU_64
:
697 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
698 if (cond
== TCG_COND_NEVER
) {
701 account_inline_branch(s
, old_cc_op
);
705 cond
= nz_cond
[mask
];
706 if (cond
== TCG_COND_NEVER
) {
709 account_inline_branch(s
, old_cc_op
);
724 account_inline_branch(s
, old_cc_op
);
739 account_inline_branch(s
, old_cc_op
);
743 switch (mask
& 0xa) {
744 case 8: /* src == 0 -> no one bit found */
747 case 2: /* src != 0 -> one bit found */
753 account_inline_branch(s
, old_cc_op
);
759 case 8 | 2: /* vr == 0 */
762 case 4 | 1: /* vr != 0 */
765 case 8 | 4: /* no carry -> vr >= src */
768 case 2 | 1: /* carry -> vr < src */
774 account_inline_branch(s
, old_cc_op
);
779 /* Note that CC=0 is impossible; treat it as dont-care. */
781 case 2: /* zero -> op1 == op2 */
784 case 4 | 1: /* !zero -> op1 != op2 */
787 case 4: /* borrow (!carry) -> op1 < op2 */
790 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
796 account_inline_branch(s
, old_cc_op
);
801 /* Calculate cc value. */
806 /* Jump based on CC. We'll load up the real cond below;
807 the assignment here merely avoids a compiler warning. */
808 account_noninline_branch(s
, old_cc_op
);
809 old_cc_op
= CC_OP_STATIC
;
810 cond
= TCG_COND_NEVER
;
814 /* Load up the arguments of the comparison. */
816 c
->g1
= c
->g2
= false;
820 c
->u
.s32
.a
= tcg_temp_new_i32();
821 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
822 c
->u
.s32
.b
= tcg_const_i32(0);
825 case CC_OP_LTUGTU_32
:
828 c
->u
.s32
.a
= tcg_temp_new_i32();
829 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
830 c
->u
.s32
.b
= tcg_temp_new_i32();
831 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
838 c
->u
.s64
.b
= tcg_const_i64(0);
842 case CC_OP_LTUGTU_64
:
846 c
->g1
= c
->g2
= true;
852 c
->u
.s64
.a
= tcg_temp_new_i64();
853 c
->u
.s64
.b
= tcg_const_i64(0);
854 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
859 c
->u
.s32
.a
= tcg_temp_new_i32();
860 c
->u
.s32
.b
= tcg_temp_new_i32();
861 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
862 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
863 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
865 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
872 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
873 c
->u
.s64
.b
= tcg_const_i64(0);
885 case 0x8 | 0x4 | 0x2: /* cc != 3 */
887 c
->u
.s32
.b
= tcg_const_i32(3);
889 case 0x8 | 0x4 | 0x1: /* cc != 2 */
891 c
->u
.s32
.b
= tcg_const_i32(2);
893 case 0x8 | 0x2 | 0x1: /* cc != 1 */
895 c
->u
.s32
.b
= tcg_const_i32(1);
897 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
900 c
->u
.s32
.a
= tcg_temp_new_i32();
901 c
->u
.s32
.b
= tcg_const_i32(0);
902 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
904 case 0x8 | 0x4: /* cc < 2 */
906 c
->u
.s32
.b
= tcg_const_i32(2);
908 case 0x8: /* cc == 0 */
910 c
->u
.s32
.b
= tcg_const_i32(0);
912 case 0x4 | 0x2 | 0x1: /* cc != 0 */
914 c
->u
.s32
.b
= tcg_const_i32(0);
916 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
919 c
->u
.s32
.a
= tcg_temp_new_i32();
920 c
->u
.s32
.b
= tcg_const_i32(0);
921 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
923 case 0x4: /* cc == 1 */
925 c
->u
.s32
.b
= tcg_const_i32(1);
927 case 0x2 | 0x1: /* cc > 1 */
929 c
->u
.s32
.b
= tcg_const_i32(1);
931 case 0x2: /* cc == 2 */
933 c
->u
.s32
.b
= tcg_const_i32(2);
935 case 0x1: /* cc == 3 */
937 c
->u
.s32
.b
= tcg_const_i32(3);
940 /* CC is masked by something else: (8 >> cc) & mask. */
943 c
->u
.s32
.a
= tcg_const_i32(8);
944 c
->u
.s32
.b
= tcg_const_i32(0);
945 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
946 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
957 static void free_compare(DisasCompare
*c
)
961 tcg_temp_free_i64(c
->u
.s64
.a
);
963 tcg_temp_free_i32(c
->u
.s32
.a
);
968 tcg_temp_free_i64(c
->u
.s64
.b
);
970 tcg_temp_free_i32(c
->u
.s32
.b
);
975 /* ====================================================================== */
976 /* Define the insn format enumeration. */
977 #define F0(N) FMT_##N,
978 #define F1(N, X1) F0(N)
979 #define F2(N, X1, X2) F0(N)
980 #define F3(N, X1, X2, X3) F0(N)
981 #define F4(N, X1, X2, X3, X4) F0(N)
982 #define F5(N, X1, X2, X3, X4, X5) F0(N)
985 #include "insn-format.def"
995 /* Define a structure to hold the decoded fields. We'll store each inside
996 an array indexed by an enum. In order to conserve memory, we'll arrange
997 for fields that do not exist at the same time to overlap, thus the "C"
998 for compact. For checking purposes there is an "O" for original index
999 as well that will be applied to availability bitmaps. */
1001 enum DisasFieldIndexO
{
1024 enum DisasFieldIndexC
{
1055 struct DisasFields
{
1059 unsigned presentC
:16;
1060 unsigned int presentO
;
1064 /* This is the way fields are to be accessed out of DisasFields. */
1065 #define have_field(S, F) have_field1((S), FLD_O_##F)
1066 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1068 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1070 return (f
->presentO
>> c
) & 1;
1073 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1074 enum DisasFieldIndexC c
)
1076 assert(have_field1(f
, o
));
1080 /* Describe the layout of each field in each format. */
1081 typedef struct DisasField
{
1083 unsigned int size
:8;
1084 unsigned int type
:2;
1085 unsigned int indexC
:6;
1086 enum DisasFieldIndexO indexO
:8;
1089 typedef struct DisasFormatInfo
{
1090 DisasField op
[NUM_C_FIELD
];
1093 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1094 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1095 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1097 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1098 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1099 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1100 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1102 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1105 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1106 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1108 #define F0(N) { { } },
1109 #define F1(N, X1) { { X1 } },
1110 #define F2(N, X1, X2) { { X1, X2 } },
1111 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1112 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1113 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1115 static const DisasFormatInfo format_info
[] = {
1116 #include "insn-format.def"
1134 /* Generally, we'll extract operands into this structures, operate upon
1135 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1136 of routines below for more details. */
1138 bool g_out
, g_out2
, g_in1
, g_in2
;
1139 TCGv_i64 out
, out2
, in1
, in2
;
1143 /* Instructions can place constraints on their operands, raising specification
1144 exceptions if they are violated. To make this easy to automate, each "in1",
1145 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1146 of the following, or 0. To make this easy to document, we'll put the
1147 SPEC_<name> defines next to <name>. */
1149 #define SPEC_r1_even 1
1150 #define SPEC_r2_even 2
1151 #define SPEC_r3_even 4
1152 #define SPEC_r1_f128 8
1153 #define SPEC_r2_f128 16
1155 /* Return values from translate_one, indicating the state of the TB. */
1157 /* Continue the TB. */
1159 /* We have emitted one or more goto_tb. No fixup required. */
1161 /* We are not using a goto_tb (for whatever reason), but have updated
1162 the PC (for whatever reason), so there's no need to do it again on
1165 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1166 updated the PC for the next instruction to be executed. */
1168 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1169 No following code will be executed. */
1173 typedef enum DisasFacility
{
1174 FAC_Z
, /* zarch (default) */
1175 FAC_CASS
, /* compare and swap and store */
1176 FAC_CASS2
, /* compare and swap and store 2*/
1177 FAC_DFP
, /* decimal floating point */
1178 FAC_DFPR
, /* decimal floating point rounding */
1179 FAC_DO
, /* distinct operands */
1180 FAC_EE
, /* execute extensions */
1181 FAC_EI
, /* extended immediate */
1182 FAC_FPE
, /* floating point extension */
1183 FAC_FPSSH
, /* floating point support sign handling */
1184 FAC_FPRGR
, /* FPR-GR transfer */
1185 FAC_GIE
, /* general instructions extension */
1186 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1187 FAC_HW
, /* high-word */
1188 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1189 FAC_MIE
, /* miscellaneous-instruction-extensions */
1190 FAC_LAT
, /* load-and-trap */
1191 FAC_LOC
, /* load/store on condition */
1192 FAC_LD
, /* long displacement */
1193 FAC_PC
, /* population count */
1194 FAC_SCF
, /* store clock fast */
1195 FAC_SFLE
, /* store facility list extended */
1196 FAC_ILA
, /* interlocked access facility 1 */
1197 FAC_LPP
, /* load-program-parameter */
1203 DisasFacility fac
:8;
1208 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1209 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1210 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1211 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1212 void (*help_cout
)(DisasContext
*, DisasOps
*);
1213 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1218 /* ====================================================================== */
1219 /* Miscellaneous helpers, used by several operations. */
1221 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1222 DisasOps
*o
, int mask
)
1224 int b2
= get_field(f
, b2
);
1225 int d2
= get_field(f
, d2
);
1228 o
->in2
= tcg_const_i64(d2
& mask
);
1230 o
->in2
= get_address(s
, 0, b2
, d2
);
1231 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1235 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1237 if (dest
== s
->next_pc
) {
1238 per_branch(s
, true);
1241 if (use_goto_tb(s
, dest
)) {
1243 per_breaking_event(s
);
1245 tcg_gen_movi_i64(psw_addr
, dest
);
1246 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1247 return EXIT_GOTO_TB
;
1249 tcg_gen_movi_i64(psw_addr
, dest
);
1250 per_branch(s
, false);
1251 return EXIT_PC_UPDATED
;
1255 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1256 bool is_imm
, int imm
, TCGv_i64 cdest
)
1259 uint64_t dest
= s
->pc
+ 2 * imm
;
1262 /* Take care of the special cases first. */
1263 if (c
->cond
== TCG_COND_NEVER
) {
1268 if (dest
== s
->next_pc
) {
1269 /* Branch to next. */
1270 per_branch(s
, true);
1274 if (c
->cond
== TCG_COND_ALWAYS
) {
1275 ret
= help_goto_direct(s
, dest
);
1279 if (TCGV_IS_UNUSED_I64(cdest
)) {
1280 /* E.g. bcr %r0 -> no branch. */
1284 if (c
->cond
== TCG_COND_ALWAYS
) {
1285 tcg_gen_mov_i64(psw_addr
, cdest
);
1286 per_branch(s
, false);
1287 ret
= EXIT_PC_UPDATED
;
1292 if (use_goto_tb(s
, s
->next_pc
)) {
1293 if (is_imm
&& use_goto_tb(s
, dest
)) {
1294 /* Both exits can use goto_tb. */
1297 lab
= gen_new_label();
1299 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1301 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1304 /* Branch not taken. */
1306 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1307 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1311 per_breaking_event(s
);
1313 tcg_gen_movi_i64(psw_addr
, dest
);
1314 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1318 /* Fallthru can use goto_tb, but taken branch cannot. */
1319 /* Store taken branch destination before the brcond. This
1320 avoids having to allocate a new local temp to hold it.
1321 We'll overwrite this in the not taken case anyway. */
1323 tcg_gen_mov_i64(psw_addr
, cdest
);
1326 lab
= gen_new_label();
1328 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1330 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1333 /* Branch not taken. */
1336 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1337 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1341 tcg_gen_movi_i64(psw_addr
, dest
);
1343 per_breaking_event(s
);
1344 ret
= EXIT_PC_UPDATED
;
1347 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1348 Most commonly we're single-stepping or some other condition that
1349 disables all use of goto_tb. Just update the PC and exit. */
1351 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1353 cdest
= tcg_const_i64(dest
);
1357 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1359 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1361 TCGv_i32 t0
= tcg_temp_new_i32();
1362 TCGv_i64 t1
= tcg_temp_new_i64();
1363 TCGv_i64 z
= tcg_const_i64(0);
1364 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1365 tcg_gen_extu_i32_i64(t1
, t0
);
1366 tcg_temp_free_i32(t0
);
1367 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1368 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1369 tcg_temp_free_i64(t1
);
1370 tcg_temp_free_i64(z
);
1374 tcg_temp_free_i64(cdest
);
1376 tcg_temp_free_i64(next
);
1378 ret
= EXIT_PC_UPDATED
;
1386 /* ====================================================================== */
1387 /* The operations. These perform the bulk of the work for any insn,
1388 usually after the operands have been loaded and output initialized. */
1390 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1393 z
= tcg_const_i64(0);
1394 n
= tcg_temp_new_i64();
1395 tcg_gen_neg_i64(n
, o
->in2
);
1396 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1397 tcg_temp_free_i64(n
);
1398 tcg_temp_free_i64(z
);
1402 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1404 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1408 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1410 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1414 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1416 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1417 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1421 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1423 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1427 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1432 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1434 /* The carry flag is the msb of CC, therefore the branch mask that would
1435 create that comparison is 3. Feeding the generated comparison to
1436 setcond produces the carry flag that we desire. */
1437 disas_jcc(s
, &cmp
, 3);
1438 carry
= tcg_temp_new_i64();
1440 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1442 TCGv_i32 t
= tcg_temp_new_i32();
1443 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1444 tcg_gen_extu_i32_i64(carry
, t
);
1445 tcg_temp_free_i32(t
);
1449 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1450 tcg_temp_free_i64(carry
);
1454 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1456 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1460 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1462 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1466 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1468 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1469 return_low128(o
->out2
);
1473 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1475 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1479 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1481 int shift
= s
->insn
->data
& 0xff;
1482 int size
= s
->insn
->data
>> 8;
1483 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1486 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1487 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1488 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1490 /* Produce the CC from only the bits manipulated. */
1491 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1492 set_cc_nz_u64(s
, cc_dst
);
1496 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1498 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1499 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1500 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1501 per_branch(s
, false);
1502 return EXIT_PC_UPDATED
;
1508 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1510 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1511 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1514 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1516 int m1
= get_field(s
->fields
, m1
);
1517 bool is_imm
= have_field(s
->fields
, i2
);
1518 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1521 /* BCR with R2 = 0 causes no branching */
1522 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1524 /* Perform serialization */
1525 /* FIXME: check for fast-BCR-serialization facility */
1526 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1529 /* Perform serialization */
1530 /* FIXME: perform checkpoint-synchronisation */
1531 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1536 disas_jcc(s
, &c
, m1
);
1537 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1540 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1542 int r1
= get_field(s
->fields
, r1
);
1543 bool is_imm
= have_field(s
->fields
, i2
);
1544 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1548 c
.cond
= TCG_COND_NE
;
1553 t
= tcg_temp_new_i64();
1554 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1555 store_reg32_i64(r1
, t
);
1556 c
.u
.s32
.a
= tcg_temp_new_i32();
1557 c
.u
.s32
.b
= tcg_const_i32(0);
1558 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1559 tcg_temp_free_i64(t
);
1561 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1564 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1566 int r1
= get_field(s
->fields
, r1
);
1567 int imm
= get_field(s
->fields
, i2
);
1571 c
.cond
= TCG_COND_NE
;
1576 t
= tcg_temp_new_i64();
1577 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1578 tcg_gen_subi_i64(t
, t
, 1);
1579 store_reg32h_i64(r1
, t
);
1580 c
.u
.s32
.a
= tcg_temp_new_i32();
1581 c
.u
.s32
.b
= tcg_const_i32(0);
1582 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1583 tcg_temp_free_i64(t
);
1585 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1588 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1590 int r1
= get_field(s
->fields
, r1
);
1591 bool is_imm
= have_field(s
->fields
, i2
);
1592 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1595 c
.cond
= TCG_COND_NE
;
1600 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1601 c
.u
.s64
.a
= regs
[r1
];
1602 c
.u
.s64
.b
= tcg_const_i64(0);
1604 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1607 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1609 int r1
= get_field(s
->fields
, r1
);
1610 int r3
= get_field(s
->fields
, r3
);
1611 bool is_imm
= have_field(s
->fields
, i2
);
1612 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1616 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1621 t
= tcg_temp_new_i64();
1622 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1623 c
.u
.s32
.a
= tcg_temp_new_i32();
1624 c
.u
.s32
.b
= tcg_temp_new_i32();
1625 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1626 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1627 store_reg32_i64(r1
, t
);
1628 tcg_temp_free_i64(t
);
1630 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1633 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1635 int r1
= get_field(s
->fields
, r1
);
1636 int r3
= get_field(s
->fields
, r3
);
1637 bool is_imm
= have_field(s
->fields
, i2
);
1638 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1641 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1644 if (r1
== (r3
| 1)) {
1645 c
.u
.s64
.b
= load_reg(r3
| 1);
1648 c
.u
.s64
.b
= regs
[r3
| 1];
1652 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1653 c
.u
.s64
.a
= regs
[r1
];
1656 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1659 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1661 int imm
, m3
= get_field(s
->fields
, m3
);
1665 c
.cond
= ltgt_cond
[m3
];
1666 if (s
->insn
->data
) {
1667 c
.cond
= tcg_unsigned_cond(c
.cond
);
1669 c
.is_64
= c
.g1
= c
.g2
= true;
1673 is_imm
= have_field(s
->fields
, i4
);
1675 imm
= get_field(s
->fields
, i4
);
1678 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1679 get_field(s
->fields
, d4
));
1682 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1685 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1687 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1692 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1694 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1699 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1701 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1706 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1708 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1709 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1710 tcg_temp_free_i32(m3
);
1711 gen_set_cc_nz_f32(s
, o
->in2
);
1715 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1717 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1718 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1719 tcg_temp_free_i32(m3
);
1720 gen_set_cc_nz_f64(s
, o
->in2
);
1724 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1726 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1727 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1728 tcg_temp_free_i32(m3
);
1729 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1733 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1735 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1736 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1737 tcg_temp_free_i32(m3
);
1738 gen_set_cc_nz_f32(s
, o
->in2
);
1742 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1744 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1745 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1746 tcg_temp_free_i32(m3
);
1747 gen_set_cc_nz_f64(s
, o
->in2
);
1751 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1753 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1754 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1755 tcg_temp_free_i32(m3
);
1756 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1760 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1762 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1763 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1764 tcg_temp_free_i32(m3
);
1765 gen_set_cc_nz_f32(s
, o
->in2
);
1769 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1771 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1772 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1773 tcg_temp_free_i32(m3
);
1774 gen_set_cc_nz_f64(s
, o
->in2
);
1778 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1780 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1781 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1782 tcg_temp_free_i32(m3
);
1783 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1787 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1789 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1790 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1791 tcg_temp_free_i32(m3
);
1792 gen_set_cc_nz_f32(s
, o
->in2
);
1796 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1798 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1799 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1800 tcg_temp_free_i32(m3
);
1801 gen_set_cc_nz_f64(s
, o
->in2
);
1805 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1807 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1808 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1809 tcg_temp_free_i32(m3
);
1810 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1814 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1816 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1817 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1818 tcg_temp_free_i32(m3
);
1822 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1824 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1825 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1826 tcg_temp_free_i32(m3
);
1830 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1832 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1833 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1834 tcg_temp_free_i32(m3
);
1835 return_low128(o
->out2
);
1839 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1841 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1842 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1843 tcg_temp_free_i32(m3
);
1847 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1849 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1850 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1851 tcg_temp_free_i32(m3
);
1855 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1857 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1858 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1859 tcg_temp_free_i32(m3
);
1860 return_low128(o
->out2
);
1864 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1866 int r2
= get_field(s
->fields
, r2
);
1867 TCGv_i64 len
= tcg_temp_new_i64();
1869 potential_page_fault(s
);
1870 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1872 return_low128(o
->out
);
1874 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1875 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1876 tcg_temp_free_i64(len
);
1881 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1883 int l
= get_field(s
->fields
, l1
);
1888 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1889 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1892 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1893 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1896 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1897 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1900 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1901 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1904 potential_page_fault(s
);
1905 vl
= tcg_const_i32(l
);
1906 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1907 tcg_temp_free_i32(vl
);
1911 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1915 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1917 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1918 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1919 potential_page_fault(s
);
1920 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1921 tcg_temp_free_i32(r1
);
1922 tcg_temp_free_i32(r3
);
1927 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1929 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1930 TCGv_i32 t1
= tcg_temp_new_i32();
1931 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1932 potential_page_fault(s
);
1933 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1935 tcg_temp_free_i32(t1
);
1936 tcg_temp_free_i32(m3
);
1940 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1942 potential_page_fault(s
);
1943 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1945 return_low128(o
->in2
);
1949 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1951 TCGv_i64 t
= tcg_temp_new_i64();
1952 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1953 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1954 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1955 tcg_temp_free_i64(t
);
1959 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1961 int d2
= get_field(s
->fields
, d2
);
1962 int b2
= get_field(s
->fields
, b2
);
1965 /* Note that in1 = R3 (new value) and
1966 in2 = (zero-extended) R1 (expected value). */
1968 addr
= get_address(s
, 0, b2
, d2
);
1969 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1970 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1971 tcg_temp_free_i64(addr
);
1973 /* Are the memory and expected values (un)equal? Note that this setcond
1974 produces the output CC value, thus the NE sense of the test. */
1975 cc
= tcg_temp_new_i64();
1976 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1977 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1978 tcg_temp_free_i64(cc
);
1984 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1986 int r1
= get_field(s
->fields
, r1
);
1987 int r3
= get_field(s
->fields
, r3
);
1988 int d2
= get_field(s
->fields
, d2
);
1989 int b2
= get_field(s
->fields
, b2
);
1991 TCGv_i32 t_r1
, t_r3
;
1993 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1994 addr
= get_address(s
, 0, b2
, d2
);
1995 t_r1
= tcg_const_i32(r1
);
1996 t_r3
= tcg_const_i32(r3
);
1997 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
1998 tcg_temp_free_i64(addr
);
1999 tcg_temp_free_i32(t_r1
);
2000 tcg_temp_free_i32(t_r3
);
2006 #ifndef CONFIG_USER_ONLY
2007 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2009 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2010 check_privileged(s
);
2011 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
2012 tcg_temp_free_i32(r1
);
2018 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2020 TCGv_i64 t1
= tcg_temp_new_i64();
2021 TCGv_i32 t2
= tcg_temp_new_i32();
2022 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2023 gen_helper_cvd(t1
, t2
);
2024 tcg_temp_free_i32(t2
);
2025 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2026 tcg_temp_free_i64(t1
);
2030 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2032 int m3
= get_field(s
->fields
, m3
);
2033 TCGLabel
*lab
= gen_new_label();
2036 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2037 if (s
->insn
->data
) {
2038 c
= tcg_unsigned_cond(c
);
2040 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2049 #ifndef CONFIG_USER_ONLY
2050 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2052 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2053 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2054 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2056 check_privileged(s
);
2060 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2062 tcg_temp_free_i32(func_code
);
2063 tcg_temp_free_i32(r3
);
2064 tcg_temp_free_i32(r1
);
2069 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2071 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2072 return_low128(o
->out
);
2076 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2078 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2079 return_low128(o
->out
);
2083 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2085 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2086 return_low128(o
->out
);
2090 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2092 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2093 return_low128(o
->out
);
2097 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2099 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2103 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2105 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2109 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2111 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2112 return_low128(o
->out2
);
2116 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2118 int r2
= get_field(s
->fields
, r2
);
2119 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2123 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2125 /* No cache information provided. */
2126 tcg_gen_movi_i64(o
->out
, -1);
2130 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2132 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2136 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2138 int r1
= get_field(s
->fields
, r1
);
2139 int r2
= get_field(s
->fields
, r2
);
2140 TCGv_i64 t
= tcg_temp_new_i64();
2142 /* Note the "subsequently" in the PoO, which implies a defined result
2143 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2144 tcg_gen_shri_i64(t
, psw_mask
, 32);
2145 store_reg32_i64(r1
, t
);
2147 store_reg32_i64(r2
, psw_mask
);
2150 tcg_temp_free_i64(t
);
2154 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2156 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2157 tb->flags, (ab)use the tb->cs_base field as the address of
2158 the template in memory, and grab 8 bits of tb->flags/cflags for
2159 the contents of the register. We would then recognize all this
2160 in gen_intermediate_code_internal, generating code for exactly
2161 one instruction. This new TB then gets executed normally.
2163 On the other hand, this seems to be mostly used for modifying
2164 MVC inside of memcpy, which needs a helper call anyway. So
2165 perhaps this doesn't bear thinking about any further. */
2172 tmp
= tcg_const_i64(s
->next_pc
);
2173 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2174 tcg_temp_free_i64(tmp
);
2179 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2181 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2182 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2183 tcg_temp_free_i32(m3
);
2187 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2189 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2190 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2191 tcg_temp_free_i32(m3
);
2195 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2197 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2198 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2199 return_low128(o
->out2
);
2200 tcg_temp_free_i32(m3
);
2204 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2206 /* We'll use the original input for cc computation, since we get to
2207 compare that against 0, which ought to be better than comparing
2208 the real output against 64. It also lets cc_dst be a convenient
2209 temporary during our computation. */
2210 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2212 /* R1 = IN ? CLZ(IN) : 64. */
2213 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2215 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2216 value by 64, which is undefined. But since the shift is 64 iff the
2217 input is zero, we still get the correct result after and'ing. */
2218 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2219 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2220 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2224 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2226 int m3
= get_field(s
->fields
, m3
);
2227 int pos
, len
, base
= s
->insn
->data
;
2228 TCGv_i64 tmp
= tcg_temp_new_i64();
2233 /* Effectively a 32-bit load. */
2234 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2241 /* Effectively a 16-bit load. */
2242 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2250 /* Effectively an 8-bit load. */
2251 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2256 pos
= base
+ ctz32(m3
) * 8;
2257 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2258 ccm
= ((1ull << len
) - 1) << pos
;
2262 /* This is going to be a sequence of loads and inserts. */
2263 pos
= base
+ 32 - 8;
2267 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2268 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2269 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2272 m3
= (m3
<< 1) & 0xf;
2278 tcg_gen_movi_i64(tmp
, ccm
);
2279 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2280 tcg_temp_free_i64(tmp
);
2284 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2286 int shift
= s
->insn
->data
& 0xff;
2287 int size
= s
->insn
->data
>> 8;
2288 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2292 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2297 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2299 t1
= tcg_temp_new_i64();
2300 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2301 tcg_gen_shri_i64(t1
, t1
, 36);
2302 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2304 tcg_gen_extu_i32_i64(t1
, cc_op
);
2305 tcg_gen_shli_i64(t1
, t1
, 28);
2306 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2307 tcg_temp_free_i64(t1
);
2311 #ifndef CONFIG_USER_ONLY
2312 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2314 check_privileged(s
);
2315 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2319 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2321 check_privileged(s
);
2322 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2327 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2329 /* The real output is indeed the original value in memory;
2330 recompute the addition for the computation of CC. */
2331 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2332 s
->insn
->data
| MO_ALIGN
);
2333 /* However, we need to recompute the addition for setting CC. */
2334 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2338 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2340 /* The real output is indeed the original value in memory;
2341 recompute the addition for the computation of CC. */
2342 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2343 s
->insn
->data
| MO_ALIGN
);
2344 /* However, we need to recompute the operation for setting CC. */
2345 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2349 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2351 /* The real output is indeed the original value in memory;
2352 recompute the addition for the computation of CC. */
2353 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2354 s
->insn
->data
| MO_ALIGN
);
2355 /* However, we need to recompute the operation for setting CC. */
2356 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2360 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2362 /* The real output is indeed the original value in memory;
2363 recompute the addition for the computation of CC. */
2364 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2365 s
->insn
->data
| MO_ALIGN
);
2366 /* However, we need to recompute the operation for setting CC. */
2367 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2371 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2373 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2377 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2379 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2383 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2385 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2389 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2391 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2395 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2397 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2398 return_low128(o
->out2
);
2402 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2404 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2405 return_low128(o
->out2
);
2409 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2411 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2415 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2417 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2421 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2423 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2427 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2429 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2433 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2435 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2439 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2441 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2445 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2447 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2451 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2453 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2457 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2459 TCGLabel
*lab
= gen_new_label();
2460 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2461 /* The value is stored even in case of trap. */
2462 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2468 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2470 TCGLabel
*lab
= gen_new_label();
2471 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2472 /* The value is stored even in case of trap. */
2473 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2479 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2481 TCGLabel
*lab
= gen_new_label();
2482 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2483 /* The value is stored even in case of trap. */
2484 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2490 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2492 TCGLabel
*lab
= gen_new_label();
2493 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2494 /* The value is stored even in case of trap. */
2495 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2501 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2503 TCGLabel
*lab
= gen_new_label();
2504 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2505 /* The value is stored even in case of trap. */
2506 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2512 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2516 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2519 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2523 TCGv_i32 t32
= tcg_temp_new_i32();
2526 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2529 t
= tcg_temp_new_i64();
2530 tcg_gen_extu_i32_i64(t
, t32
);
2531 tcg_temp_free_i32(t32
);
2533 z
= tcg_const_i64(0);
2534 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2535 tcg_temp_free_i64(t
);
2536 tcg_temp_free_i64(z
);
2542 #ifndef CONFIG_USER_ONLY
2543 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2545 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2546 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2547 check_privileged(s
);
2548 potential_page_fault(s
);
2549 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2550 tcg_temp_free_i32(r1
);
2551 tcg_temp_free_i32(r3
);
2555 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2557 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2558 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2559 check_privileged(s
);
2560 potential_page_fault(s
);
2561 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2562 tcg_temp_free_i32(r1
);
2563 tcg_temp_free_i32(r3
);
2567 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2569 check_privileged(s
);
2570 potential_page_fault(s
);
2571 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2576 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2578 check_privileged(s
);
2580 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2584 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2588 check_privileged(s
);
2589 per_breaking_event(s
);
2591 t1
= tcg_temp_new_i64();
2592 t2
= tcg_temp_new_i64();
2593 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2594 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2595 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2596 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2597 tcg_gen_shli_i64(t1
, t1
, 32);
2598 gen_helper_load_psw(cpu_env
, t1
, t2
);
2599 tcg_temp_free_i64(t1
);
2600 tcg_temp_free_i64(t2
);
2601 return EXIT_NORETURN
;
2604 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2608 check_privileged(s
);
2609 per_breaking_event(s
);
2611 t1
= tcg_temp_new_i64();
2612 t2
= tcg_temp_new_i64();
2613 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2614 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2615 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2616 gen_helper_load_psw(cpu_env
, t1
, t2
);
2617 tcg_temp_free_i64(t1
);
2618 tcg_temp_free_i64(t2
);
2619 return EXIT_NORETURN
;
2623 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2625 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2626 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2627 potential_page_fault(s
);
2628 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2629 tcg_temp_free_i32(r1
);
2630 tcg_temp_free_i32(r3
);
2634 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2636 int r1
= get_field(s
->fields
, r1
);
2637 int r3
= get_field(s
->fields
, r3
);
2640 /* Only one register to read. */
2641 t1
= tcg_temp_new_i64();
2642 if (unlikely(r1
== r3
)) {
2643 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2644 store_reg32_i64(r1
, t1
);
2649 /* First load the values of the first and last registers to trigger
2650 possible page faults. */
2651 t2
= tcg_temp_new_i64();
2652 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2653 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2654 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2655 store_reg32_i64(r1
, t1
);
2656 store_reg32_i64(r3
, t2
);
2658 /* Only two registers to read. */
2659 if (((r1
+ 1) & 15) == r3
) {
2665 /* Then load the remaining registers. Page fault can't occur. */
2667 tcg_gen_movi_i64(t2
, 4);
2670 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2671 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2672 store_reg32_i64(r1
, t1
);
2680 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2682 int r1
= get_field(s
->fields
, r1
);
2683 int r3
= get_field(s
->fields
, r3
);
2686 /* Only one register to read. */
2687 t1
= tcg_temp_new_i64();
2688 if (unlikely(r1
== r3
)) {
2689 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2690 store_reg32h_i64(r1
, t1
);
2695 /* First load the values of the first and last registers to trigger
2696 possible page faults. */
2697 t2
= tcg_temp_new_i64();
2698 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2699 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2700 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2701 store_reg32h_i64(r1
, t1
);
2702 store_reg32h_i64(r3
, t2
);
2704 /* Only two registers to read. */
2705 if (((r1
+ 1) & 15) == r3
) {
2711 /* Then load the remaining registers. Page fault can't occur. */
2713 tcg_gen_movi_i64(t2
, 4);
2716 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2717 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2718 store_reg32h_i64(r1
, t1
);
2726 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2728 int r1
= get_field(s
->fields
, r1
);
2729 int r3
= get_field(s
->fields
, r3
);
2732 /* Only one register to read. */
2733 if (unlikely(r1
== r3
)) {
2734 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2738 /* First load the values of the first and last registers to trigger
2739 possible page faults. */
2740 t1
= tcg_temp_new_i64();
2741 t2
= tcg_temp_new_i64();
2742 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2743 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2744 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2745 tcg_gen_mov_i64(regs
[r1
], t1
);
2748 /* Only two registers to read. */
2749 if (((r1
+ 1) & 15) == r3
) {
2754 /* Then load the remaining registers. Page fault can't occur. */
2756 tcg_gen_movi_i64(t1
, 8);
2759 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2760 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2767 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2770 TCGMemOp mop
= s
->insn
->data
;
2772 /* In a parallel context, stop the world and single step. */
2773 if (parallel_cpus
) {
2774 potential_page_fault(s
);
2775 gen_exception(EXCP_ATOMIC
);
2776 return EXIT_NORETURN
;
2779 /* In a serial context, perform the two loads ... */
2780 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2781 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2782 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2783 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2784 tcg_temp_free_i64(a1
);
2785 tcg_temp_free_i64(a2
);
2787 /* ... and indicate that we performed them while interlocked. */
2788 gen_op_movi_cc(s
, 0);
2792 #ifndef CONFIG_USER_ONLY
2793 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2795 check_privileged(s
);
2796 potential_page_fault(s
);
2797 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2801 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2803 check_privileged(s
);
2804 potential_page_fault(s
);
2805 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2810 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2813 o
->g_out
= o
->g_in2
;
2814 TCGV_UNUSED_I64(o
->in2
);
2819 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2821 int b2
= get_field(s
->fields
, b2
);
2822 TCGv ar1
= tcg_temp_new_i64();
2825 o
->g_out
= o
->g_in2
;
2826 TCGV_UNUSED_I64(o
->in2
);
2829 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2830 case PSW_ASC_PRIMARY
>> 32:
2831 tcg_gen_movi_i64(ar1
, 0);
2833 case PSW_ASC_ACCREG
>> 32:
2834 tcg_gen_movi_i64(ar1
, 1);
2836 case PSW_ASC_SECONDARY
>> 32:
2838 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2840 tcg_gen_movi_i64(ar1
, 0);
2843 case PSW_ASC_HOME
>> 32:
2844 tcg_gen_movi_i64(ar1
, 2);
2848 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2849 tcg_temp_free_i64(ar1
);
2854 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2858 o
->g_out
= o
->g_in1
;
2859 o
->g_out2
= o
->g_in2
;
2860 TCGV_UNUSED_I64(o
->in1
);
2861 TCGV_UNUSED_I64(o
->in2
);
2862 o
->g_in1
= o
->g_in2
= false;
2866 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2868 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2869 potential_page_fault(s
);
2870 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2871 tcg_temp_free_i32(l
);
2875 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2877 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2878 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2879 potential_page_fault(s
);
2880 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2881 tcg_temp_free_i32(r1
);
2882 tcg_temp_free_i32(r2
);
2887 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2889 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2890 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2891 potential_page_fault(s
);
2892 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2893 tcg_temp_free_i32(r1
);
2894 tcg_temp_free_i32(r3
);
2899 #ifndef CONFIG_USER_ONLY
2900 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2902 int r1
= get_field(s
->fields
, l1
);
2903 check_privileged(s
);
2904 potential_page_fault(s
);
2905 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2910 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2912 int r1
= get_field(s
->fields
, l1
);
2913 check_privileged(s
);
2914 potential_page_fault(s
);
2915 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2921 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2923 potential_page_fault(s
);
2924 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2929 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2931 potential_page_fault(s
);
2932 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2934 return_low128(o
->in2
);
2938 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2940 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2944 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2946 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2950 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2952 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2956 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2958 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2962 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2964 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2968 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2970 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2971 return_low128(o
->out2
);
2975 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2977 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2978 return_low128(o
->out2
);
2982 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2984 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2985 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2986 tcg_temp_free_i64(r3
);
2990 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2992 int r3
= get_field(s
->fields
, r3
);
2993 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2997 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2999 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3000 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3001 tcg_temp_free_i64(r3
);
3005 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3007 int r3
= get_field(s
->fields
, r3
);
3008 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3012 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3015 z
= tcg_const_i64(0);
3016 n
= tcg_temp_new_i64();
3017 tcg_gen_neg_i64(n
, o
->in2
);
3018 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3019 tcg_temp_free_i64(n
);
3020 tcg_temp_free_i64(z
);
3024 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3026 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3030 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3032 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3036 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3038 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3039 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3043 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3045 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3046 potential_page_fault(s
);
3047 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3048 tcg_temp_free_i32(l
);
3053 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3055 tcg_gen_neg_i64(o
->out
, o
->in2
);
3059 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3061 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3065 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3067 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3071 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3073 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3074 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3078 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3080 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3081 potential_page_fault(s
);
3082 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3083 tcg_temp_free_i32(l
);
3088 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3090 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3094 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3096 int shift
= s
->insn
->data
& 0xff;
3097 int size
= s
->insn
->data
>> 8;
3098 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3101 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3102 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3104 /* Produce the CC from only the bits manipulated. */
3105 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3106 set_cc_nz_u64(s
, cc_dst
);
3110 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3112 gen_helper_popcnt(o
->out
, o
->in2
);
3116 #ifndef CONFIG_USER_ONLY
3117 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3119 check_privileged(s
);
3120 gen_helper_ptlb(cpu_env
);
3125 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3127 int i3
= get_field(s
->fields
, i3
);
3128 int i4
= get_field(s
->fields
, i4
);
3129 int i5
= get_field(s
->fields
, i5
);
3130 int do_zero
= i4
& 0x80;
3131 uint64_t mask
, imask
, pmask
;
3134 /* Adjust the arguments for the specific insn. */
3135 switch (s
->fields
->op2
) {
3136 case 0x55: /* risbg */
3141 case 0x5d: /* risbhg */
3144 pmask
= 0xffffffff00000000ull
;
3146 case 0x51: /* risblg */
3149 pmask
= 0x00000000ffffffffull
;
3155 /* MASK is the set of bits to be inserted from R2.
3156 Take care for I3/I4 wraparound. */
3159 mask
^= pmask
>> i4
>> 1;
3161 mask
|= ~(pmask
>> i4
>> 1);
3165 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3166 insns, we need to keep the other half of the register. */
3167 imask
= ~mask
| ~pmask
;
3169 if (s
->fields
->op2
== 0x55) {
3179 if (s
->fields
->op2
== 0x5d) {
3183 /* In some cases we can implement this with extract. */
3184 if (imask
== 0 && pos
== 0 && len
> 0 && rot
+ len
<= 64) {
3185 tcg_gen_extract_i64(o
->out
, o
->in2
, rot
, len
);
3189 /* In some cases we can implement this with deposit. */
3190 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3191 /* Note that we rotate the bits to be inserted to the lsb, not to
3192 the position as described in the PoO. */
3193 rot
= (rot
- pos
) & 63;
3198 /* Rotate the input as necessary. */
3199 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3201 /* Insert the selected bits into the output. */
3204 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3206 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3208 } else if (imask
== 0) {
3209 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3211 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3212 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3213 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3218 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3220 int i3
= get_field(s
->fields
, i3
);
3221 int i4
= get_field(s
->fields
, i4
);
3222 int i5
= get_field(s
->fields
, i5
);
3225 /* If this is a test-only form, arrange to discard the result. */
3227 o
->out
= tcg_temp_new_i64();
3235 /* MASK is the set of bits to be operated on from R2.
3236 Take care for I3/I4 wraparound. */
3239 mask
^= ~0ull >> i4
>> 1;
3241 mask
|= ~(~0ull >> i4
>> 1);
3244 /* Rotate the input as necessary. */
3245 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3248 switch (s
->fields
->op2
) {
3249 case 0x55: /* AND */
3250 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3251 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3254 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3255 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3257 case 0x57: /* XOR */
3258 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3259 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3266 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3267 set_cc_nz_u64(s
, cc_dst
);
3271 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3273 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3277 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3279 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3283 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3285 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3289 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3291 TCGv_i32 t1
= tcg_temp_new_i32();
3292 TCGv_i32 t2
= tcg_temp_new_i32();
3293 TCGv_i32 to
= tcg_temp_new_i32();
3294 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3295 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3296 tcg_gen_rotl_i32(to
, t1
, t2
);
3297 tcg_gen_extu_i32_i64(o
->out
, to
);
3298 tcg_temp_free_i32(t1
);
3299 tcg_temp_free_i32(t2
);
3300 tcg_temp_free_i32(to
);
3304 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3306 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3310 #ifndef CONFIG_USER_ONLY
3311 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3313 check_privileged(s
);
3314 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3319 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3321 check_privileged(s
);
3322 gen_helper_sacf(cpu_env
, o
->in2
);
3323 /* Addressing mode has changed, so end the block. */
3324 return EXIT_PC_STALE
;
3328 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3330 int sam
= s
->insn
->data
;
3346 /* Bizarre but true, we check the address of the current insn for the
3347 specification exception, not the next to be executed. Thus the PoO
3348 documents that Bad Things Happen two bytes before the end. */
3349 if (s
->pc
& ~mask
) {
3350 gen_program_exception(s
, PGM_SPECIFICATION
);
3351 return EXIT_NORETURN
;
3355 tsam
= tcg_const_i64(sam
);
3356 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3357 tcg_temp_free_i64(tsam
);
3359 /* Always exit the TB, since we (may have) changed execution mode. */
3360 return EXIT_PC_STALE
;
3363 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3365 int r1
= get_field(s
->fields
, r1
);
3366 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3370 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3372 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3376 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3378 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3382 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3384 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3385 return_low128(o
->out2
);
3389 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3391 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3395 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3397 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3401 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3403 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3404 return_low128(o
->out2
);
3408 #ifndef CONFIG_USER_ONLY
3409 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3411 check_privileged(s
);
3412 potential_page_fault(s
);
3413 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3418 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3420 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3421 check_privileged(s
);
3422 potential_page_fault(s
);
3423 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3425 tcg_temp_free_i32(r1
);
3430 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3437 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3439 /* We want to store when the condition is fulfilled, so branch
3440 out when it's not */
3441 c
.cond
= tcg_invert_cond(c
.cond
);
3443 lab
= gen_new_label();
3445 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3447 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3451 r1
= get_field(s
->fields
, r1
);
3452 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3453 if (s
->insn
->data
) {
3454 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3456 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3458 tcg_temp_free_i64(a
);
3464 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3466 uint64_t sign
= 1ull << s
->insn
->data
;
3467 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3468 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3469 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3470 /* The arithmetic left shift is curious in that it does not affect
3471 the sign bit. Copy that over from the source unchanged. */
3472 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3473 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3474 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3478 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3480 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3484 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3486 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3490 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3492 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3496 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3498 gen_helper_sfpc(cpu_env
, o
->in2
);
3502 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3504 gen_helper_sfas(cpu_env
, o
->in2
);
3508 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3510 int b2
= get_field(s
->fields
, b2
);
3511 int d2
= get_field(s
->fields
, d2
);
3512 TCGv_i64 t1
= tcg_temp_new_i64();
3513 TCGv_i64 t2
= tcg_temp_new_i64();
3516 switch (s
->fields
->op2
) {
3517 case 0x99: /* SRNM */
3520 case 0xb8: /* SRNMB */
3523 case 0xb9: /* SRNMT */
3529 mask
= (1 << len
) - 1;
3531 /* Insert the value into the appropriate field of the FPC. */
3533 tcg_gen_movi_i64(t1
, d2
& mask
);
3535 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3536 tcg_gen_andi_i64(t1
, t1
, mask
);
3538 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3539 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3540 tcg_temp_free_i64(t1
);
3542 /* Then install the new FPC to set the rounding mode in fpu_status. */
3543 gen_helper_sfpc(cpu_env
, t2
);
3544 tcg_temp_free_i64(t2
);
3548 #ifndef CONFIG_USER_ONLY
3549 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3551 check_privileged(s
);
3552 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3553 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3557 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3559 check_privileged(s
);
3560 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3564 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3566 check_privileged(s
);
3567 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3571 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3573 check_privileged(s
);
3574 /* ??? Surely cpu address != cpu number. In any case the previous
3575 version of this stored more than the required half-word, so it
3576 is unlikely this has ever been tested. */
3577 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3581 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3583 gen_helper_stck(o
->out
, cpu_env
);
3584 /* ??? We don't implement clock states. */
3585 gen_op_movi_cc(s
, 0);
3589 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3591 TCGv_i64 c1
= tcg_temp_new_i64();
3592 TCGv_i64 c2
= tcg_temp_new_i64();
3593 gen_helper_stck(c1
, cpu_env
);
3594 /* Shift the 64-bit value into its place as a zero-extended
3595 104-bit value. Note that "bit positions 64-103 are always
3596 non-zero so that they compare differently to STCK"; we set
3597 the least significant bit to 1. */
3598 tcg_gen_shli_i64(c2
, c1
, 56);
3599 tcg_gen_shri_i64(c1
, c1
, 8);
3600 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3601 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3602 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3603 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3604 tcg_temp_free_i64(c1
);
3605 tcg_temp_free_i64(c2
);
3606 /* ??? We don't implement clock states. */
3607 gen_op_movi_cc(s
, 0);
3611 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3613 check_privileged(s
);
3614 gen_helper_sckc(cpu_env
, o
->in2
);
3618 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3620 check_privileged(s
);
3621 gen_helper_stckc(o
->out
, cpu_env
);
3625 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3627 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3628 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3629 check_privileged(s
);
3630 potential_page_fault(s
);
3631 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3632 tcg_temp_free_i32(r1
);
3633 tcg_temp_free_i32(r3
);
3637 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3639 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3640 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3641 check_privileged(s
);
3642 potential_page_fault(s
);
3643 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3644 tcg_temp_free_i32(r1
);
3645 tcg_temp_free_i32(r3
);
3649 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3651 TCGv_i64 t1
= tcg_temp_new_i64();
3653 check_privileged(s
);
3654 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3655 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3656 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3657 tcg_temp_free_i64(t1
);
3662 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3664 check_privileged(s
);
3665 gen_helper_spt(cpu_env
, o
->in2
);
3669 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3671 check_privileged(s
);
3672 gen_helper_stfl(cpu_env
);
3676 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3678 check_privileged(s
);
3679 gen_helper_stpt(o
->out
, cpu_env
);
3683 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3685 check_privileged(s
);
3686 potential_page_fault(s
);
3687 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3692 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3694 check_privileged(s
);
3695 gen_helper_spx(cpu_env
, o
->in2
);
3699 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3701 check_privileged(s
);
3702 potential_page_fault(s
);
3703 gen_helper_xsch(cpu_env
, regs
[1]);
3708 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3710 check_privileged(s
);
3711 potential_page_fault(s
);
3712 gen_helper_csch(cpu_env
, regs
[1]);
3717 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3719 check_privileged(s
);
3720 potential_page_fault(s
);
3721 gen_helper_hsch(cpu_env
, regs
[1]);
3726 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3728 check_privileged(s
);
3729 potential_page_fault(s
);
3730 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3735 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3737 check_privileged(s
);
3738 potential_page_fault(s
);
3739 gen_helper_rchp(cpu_env
, regs
[1]);
3744 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3746 check_privileged(s
);
3747 potential_page_fault(s
);
3748 gen_helper_rsch(cpu_env
, regs
[1]);
3753 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3755 check_privileged(s
);
3756 potential_page_fault(s
);
3757 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3762 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3764 check_privileged(s
);
3765 potential_page_fault(s
);
3766 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3771 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3773 check_privileged(s
);
3774 potential_page_fault(s
);
3775 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3780 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3782 check_privileged(s
);
3783 potential_page_fault(s
);
3784 gen_helper_chsc(cpu_env
, o
->in2
);
3789 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3791 check_privileged(s
);
3792 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3793 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3797 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3799 uint64_t i2
= get_field(s
->fields
, i2
);
3802 check_privileged(s
);
3804 /* It is important to do what the instruction name says: STORE THEN.
3805 If we let the output hook perform the store then if we fault and
3806 restart, we'll have the wrong SYSTEM MASK in place. */
3807 t
= tcg_temp_new_i64();
3808 tcg_gen_shri_i64(t
, psw_mask
, 56);
3809 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3810 tcg_temp_free_i64(t
);
3812 if (s
->fields
->op
== 0xac) {
3813 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3814 (i2
<< 56) | 0x00ffffffffffffffull
);
3816 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3821 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3823 check_privileged(s
);
3824 potential_page_fault(s
);
3825 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3829 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3831 check_privileged(s
);
3832 potential_page_fault(s
);
3833 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3838 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
3840 potential_page_fault(s
);
3841 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
3846 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3848 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3852 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3854 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3858 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3860 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3864 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3866 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3870 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3872 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3873 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3874 potential_page_fault(s
);
3875 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3876 tcg_temp_free_i32(r1
);
3877 tcg_temp_free_i32(r3
);
3881 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3883 int m3
= get_field(s
->fields
, m3
);
3884 int pos
, base
= s
->insn
->data
;
3885 TCGv_i64 tmp
= tcg_temp_new_i64();
3887 pos
= base
+ ctz32(m3
) * 8;
3890 /* Effectively a 32-bit store. */
3891 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3892 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3898 /* Effectively a 16-bit store. */
3899 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3900 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3907 /* Effectively an 8-bit store. */
3908 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3909 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3913 /* This is going to be a sequence of shifts and stores. */
3914 pos
= base
+ 32 - 8;
3917 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3918 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3919 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3921 m3
= (m3
<< 1) & 0xf;
3926 tcg_temp_free_i64(tmp
);
3930 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3932 int r1
= get_field(s
->fields
, r1
);
3933 int r3
= get_field(s
->fields
, r3
);
3934 int size
= s
->insn
->data
;
3935 TCGv_i64 tsize
= tcg_const_i64(size
);
3939 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3941 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3946 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3950 tcg_temp_free_i64(tsize
);
3954 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3956 int r1
= get_field(s
->fields
, r1
);
3957 int r3
= get_field(s
->fields
, r3
);
3958 TCGv_i64 t
= tcg_temp_new_i64();
3959 TCGv_i64 t4
= tcg_const_i64(4);
3960 TCGv_i64 t32
= tcg_const_i64(32);
3963 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3964 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3968 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3972 tcg_temp_free_i64(t
);
3973 tcg_temp_free_i64(t4
);
3974 tcg_temp_free_i64(t32
);
3978 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3980 potential_page_fault(s
);
3981 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3983 return_low128(o
->in2
);
3987 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3989 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3993 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3998 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4000 /* The !borrow flag is the msb of CC. Since we want the inverse of
4001 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4002 disas_jcc(s
, &cmp
, 8 | 4);
4003 borrow
= tcg_temp_new_i64();
4005 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4007 TCGv_i32 t
= tcg_temp_new_i32();
4008 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4009 tcg_gen_extu_i32_i64(borrow
, t
);
4010 tcg_temp_free_i32(t
);
4014 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4015 tcg_temp_free_i64(borrow
);
4019 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4026 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4027 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4028 tcg_temp_free_i32(t
);
4030 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
4031 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4032 tcg_temp_free_i32(t
);
4034 gen_exception(EXCP_SVC
);
4035 return EXIT_NORETURN
;
4038 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4040 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4045 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4047 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4052 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4054 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4059 #ifndef CONFIG_USER_ONLY
4060 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4062 potential_page_fault(s
);
4063 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4069 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4071 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4072 potential_page_fault(s
);
4073 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4074 tcg_temp_free_i32(l
);
4079 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4081 potential_page_fault(s
);
4082 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4083 return_low128(o
->out2
);
4088 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4090 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4091 potential_page_fault(s
);
4092 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4093 tcg_temp_free_i32(l
);
4098 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4100 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4101 potential_page_fault(s
);
4102 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4103 tcg_temp_free_i32(l
);
4107 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4109 int d1
= get_field(s
->fields
, d1
);
4110 int d2
= get_field(s
->fields
, d2
);
4111 int b1
= get_field(s
->fields
, b1
);
4112 int b2
= get_field(s
->fields
, b2
);
4113 int l
= get_field(s
->fields
, l1
);
4116 o
->addr1
= get_address(s
, 0, b1
, d1
);
4118 /* If the addresses are identical, this is a store/memset of zero. */
4119 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4120 o
->in2
= tcg_const_i64(0);
4124 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4127 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4131 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4134 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4138 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4141 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4145 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4147 gen_op_movi_cc(s
, 0);
4151 /* But in general we'll defer to a helper. */
4152 o
->in2
= get_address(s
, 0, b2
, d2
);
4153 t32
= tcg_const_i32(l
);
4154 potential_page_fault(s
);
4155 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4156 tcg_temp_free_i32(t32
);
4161 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4163 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4167 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4169 int shift
= s
->insn
->data
& 0xff;
4170 int size
= s
->insn
->data
>> 8;
4171 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4174 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4175 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4177 /* Produce the CC from only the bits manipulated. */
4178 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4179 set_cc_nz_u64(s
, cc_dst
);
4183 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4185 o
->out
= tcg_const_i64(0);
4189 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4191 o
->out
= tcg_const_i64(0);
4197 /* ====================================================================== */
4198 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4199 the original inputs), update the various cc data structures in order to
4200 be able to compute the new condition code. */
4202 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4204 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4207 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4209 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4212 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4214 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4217 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4219 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4222 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4224 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4227 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4229 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4232 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4234 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4237 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4239 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4242 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4244 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4247 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4249 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4252 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4254 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4257 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4259 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4262 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4264 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4267 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4269 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4272 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4274 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4277 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4279 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4282 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4284 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4287 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4289 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4292 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4294 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4297 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4299 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4300 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4303 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4305 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4308 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4310 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4313 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4315 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4318 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4320 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4323 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4325 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4328 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4330 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4333 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4335 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4338 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4340 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4343 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4345 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4348 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4350 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4353 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4355 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4358 /* ====================================================================== */
4359 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4360 with the TCG register to which we will write. Used in combination with
4361 the "wout" generators, in some cases we need a new temporary, and in
4362 some cases we can write to a TCG global. */
4364 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4366 o
->out
= tcg_temp_new_i64();
4368 #define SPEC_prep_new 0
4370 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4372 o
->out
= tcg_temp_new_i64();
4373 o
->out2
= tcg_temp_new_i64();
4375 #define SPEC_prep_new_P 0
4377 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4379 o
->out
= regs
[get_field(f
, r1
)];
4382 #define SPEC_prep_r1 0
4384 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4386 int r1
= get_field(f
, r1
);
4388 o
->out2
= regs
[r1
+ 1];
4389 o
->g_out
= o
->g_out2
= true;
4391 #define SPEC_prep_r1_P SPEC_r1_even
4393 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4395 o
->out
= fregs
[get_field(f
, r1
)];
4398 #define SPEC_prep_f1 0
4400 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4402 int r1
= get_field(f
, r1
);
4404 o
->out2
= fregs
[r1
+ 2];
4405 o
->g_out
= o
->g_out2
= true;
4407 #define SPEC_prep_x1 SPEC_r1_f128
4409 /* ====================================================================== */
4410 /* The "Write OUTput" generators. These generally perform some non-trivial
4411 copy of data to TCG globals, or to main memory. The trivial cases are
4412 generally handled by having a "prep" generator install the TCG global
4413 as the destination of the operation. */
4415 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4417 store_reg(get_field(f
, r1
), o
->out
);
4419 #define SPEC_wout_r1 0
4421 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4423 int r1
= get_field(f
, r1
);
4424 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4426 #define SPEC_wout_r1_8 0
4428 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4430 int r1
= get_field(f
, r1
);
4431 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4433 #define SPEC_wout_r1_16 0
4435 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4437 store_reg32_i64(get_field(f
, r1
), o
->out
);
4439 #define SPEC_wout_r1_32 0
4441 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4443 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4445 #define SPEC_wout_r1_32h 0
4447 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4449 int r1
= get_field(f
, r1
);
4450 store_reg32_i64(r1
, o
->out
);
4451 store_reg32_i64(r1
+ 1, o
->out2
);
4453 #define SPEC_wout_r1_P32 SPEC_r1_even
4455 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4457 int r1
= get_field(f
, r1
);
4458 store_reg32_i64(r1
+ 1, o
->out
);
4459 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4460 store_reg32_i64(r1
, o
->out
);
4462 #define SPEC_wout_r1_D32 SPEC_r1_even
4464 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4466 int r3
= get_field(f
, r3
);
4467 store_reg32_i64(r3
, o
->out
);
4468 store_reg32_i64(r3
+ 1, o
->out2
);
4470 #define SPEC_wout_r3_P32 SPEC_r3_even
4472 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4474 int r3
= get_field(f
, r3
);
4475 store_reg(r3
, o
->out
);
4476 store_reg(r3
+ 1, o
->out2
);
4478 #define SPEC_wout_r3_P64 SPEC_r3_even
4480 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4482 store_freg32_i64(get_field(f
, r1
), o
->out
);
4484 #define SPEC_wout_e1 0
4486 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4488 store_freg(get_field(f
, r1
), o
->out
);
4490 #define SPEC_wout_f1 0
4492 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4494 int f1
= get_field(s
->fields
, r1
);
4495 store_freg(f1
, o
->out
);
4496 store_freg(f1
+ 2, o
->out2
);
4498 #define SPEC_wout_x1 SPEC_r1_f128
4500 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4502 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4503 store_reg32_i64(get_field(f
, r1
), o
->out
);
4506 #define SPEC_wout_cond_r1r2_32 0
4508 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4510 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4511 store_freg32_i64(get_field(f
, r1
), o
->out
);
4514 #define SPEC_wout_cond_e1e2 0
4516 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4518 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4520 #define SPEC_wout_m1_8 0
4522 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4524 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4526 #define SPEC_wout_m1_16 0
4528 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4530 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4532 #define SPEC_wout_m1_32 0
4534 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4536 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4538 #define SPEC_wout_m1_64 0
4540 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4542 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4544 #define SPEC_wout_m2_32 0
4546 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4548 store_reg(get_field(f
, r1
), o
->in2
);
4550 #define SPEC_wout_in2_r1 0
4552 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4554 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4556 #define SPEC_wout_in2_r1_32 0
4558 /* ====================================================================== */
4559 /* The "INput 1" generators. These load the first operand to an insn. */
4561 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4563 o
->in1
= load_reg(get_field(f
, r1
));
4565 #define SPEC_in1_r1 0
4567 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4569 o
->in1
= regs
[get_field(f
, r1
)];
4572 #define SPEC_in1_r1_o 0
4574 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4576 o
->in1
= tcg_temp_new_i64();
4577 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4579 #define SPEC_in1_r1_32s 0
4581 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4583 o
->in1
= tcg_temp_new_i64();
4584 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4586 #define SPEC_in1_r1_32u 0
4588 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4590 o
->in1
= tcg_temp_new_i64();
4591 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4593 #define SPEC_in1_r1_sr32 0
4595 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4597 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4599 #define SPEC_in1_r1p1 SPEC_r1_even
4601 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4603 o
->in1
= tcg_temp_new_i64();
4604 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4606 #define SPEC_in1_r1p1_32s SPEC_r1_even
4608 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4610 o
->in1
= tcg_temp_new_i64();
4611 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4613 #define SPEC_in1_r1p1_32u SPEC_r1_even
4615 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4617 int r1
= get_field(f
, r1
);
4618 o
->in1
= tcg_temp_new_i64();
4619 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4621 #define SPEC_in1_r1_D32 SPEC_r1_even
4623 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4625 o
->in1
= load_reg(get_field(f
, r2
));
4627 #define SPEC_in1_r2 0
4629 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4631 o
->in1
= tcg_temp_new_i64();
4632 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4634 #define SPEC_in1_r2_sr32 0
4636 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4638 o
->in1
= load_reg(get_field(f
, r3
));
4640 #define SPEC_in1_r3 0
4642 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4644 o
->in1
= regs
[get_field(f
, r3
)];
4647 #define SPEC_in1_r3_o 0
4649 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4651 o
->in1
= tcg_temp_new_i64();
4652 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4654 #define SPEC_in1_r3_32s 0
4656 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4658 o
->in1
= tcg_temp_new_i64();
4659 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4661 #define SPEC_in1_r3_32u 0
4663 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4665 int r3
= get_field(f
, r3
);
4666 o
->in1
= tcg_temp_new_i64();
4667 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4669 #define SPEC_in1_r3_D32 SPEC_r3_even
4671 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4673 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4675 #define SPEC_in1_e1 0
4677 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4679 o
->in1
= fregs
[get_field(f
, r1
)];
4682 #define SPEC_in1_f1_o 0
4684 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4686 int r1
= get_field(f
, r1
);
4688 o
->out2
= fregs
[r1
+ 2];
4689 o
->g_out
= o
->g_out2
= true;
4691 #define SPEC_in1_x1_o SPEC_r1_f128
4693 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4695 o
->in1
= fregs
[get_field(f
, r3
)];
4698 #define SPEC_in1_f3_o 0
4700 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4702 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4704 #define SPEC_in1_la1 0
4706 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4708 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4709 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4711 #define SPEC_in1_la2 0
4713 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4716 o
->in1
= tcg_temp_new_i64();
4717 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4719 #define SPEC_in1_m1_8u 0
4721 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4724 o
->in1
= tcg_temp_new_i64();
4725 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4727 #define SPEC_in1_m1_16s 0
4729 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4732 o
->in1
= tcg_temp_new_i64();
4733 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4735 #define SPEC_in1_m1_16u 0
4737 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4740 o
->in1
= tcg_temp_new_i64();
4741 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4743 #define SPEC_in1_m1_32s 0
4745 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4748 o
->in1
= tcg_temp_new_i64();
4749 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4751 #define SPEC_in1_m1_32u 0
4753 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4756 o
->in1
= tcg_temp_new_i64();
4757 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4759 #define SPEC_in1_m1_64 0
4761 /* ====================================================================== */
4762 /* The "INput 2" generators. These load the second operand to an insn. */
4764 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4766 o
->in2
= regs
[get_field(f
, r1
)];
4769 #define SPEC_in2_r1_o 0
4771 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4773 o
->in2
= tcg_temp_new_i64();
4774 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4776 #define SPEC_in2_r1_16u 0
4778 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4780 o
->in2
= tcg_temp_new_i64();
4781 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4783 #define SPEC_in2_r1_32u 0
4785 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4787 int r1
= get_field(f
, r1
);
4788 o
->in2
= tcg_temp_new_i64();
4789 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4791 #define SPEC_in2_r1_D32 SPEC_r1_even
4793 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4795 o
->in2
= load_reg(get_field(f
, r2
));
4797 #define SPEC_in2_r2 0
4799 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4801 o
->in2
= regs
[get_field(f
, r2
)];
4804 #define SPEC_in2_r2_o 0
4806 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4808 int r2
= get_field(f
, r2
);
4810 o
->in2
= load_reg(r2
);
4813 #define SPEC_in2_r2_nz 0
4815 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4817 o
->in2
= tcg_temp_new_i64();
4818 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4820 #define SPEC_in2_r2_8s 0
4822 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4824 o
->in2
= tcg_temp_new_i64();
4825 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4827 #define SPEC_in2_r2_8u 0
4829 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4831 o
->in2
= tcg_temp_new_i64();
4832 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4834 #define SPEC_in2_r2_16s 0
4836 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4838 o
->in2
= tcg_temp_new_i64();
4839 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4841 #define SPEC_in2_r2_16u 0
4843 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4845 o
->in2
= load_reg(get_field(f
, r3
));
4847 #define SPEC_in2_r3 0
4849 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4851 o
->in2
= tcg_temp_new_i64();
4852 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4854 #define SPEC_in2_r3_sr32 0
4856 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4858 o
->in2
= tcg_temp_new_i64();
4859 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4861 #define SPEC_in2_r2_32s 0
4863 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4865 o
->in2
= tcg_temp_new_i64();
4866 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4868 #define SPEC_in2_r2_32u 0
4870 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4872 o
->in2
= tcg_temp_new_i64();
4873 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4875 #define SPEC_in2_r2_sr32 0
4877 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4879 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4881 #define SPEC_in2_e2 0
4883 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4885 o
->in2
= fregs
[get_field(f
, r2
)];
4888 #define SPEC_in2_f2_o 0
4890 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4892 int r2
= get_field(f
, r2
);
4894 o
->in2
= fregs
[r2
+ 2];
4895 o
->g_in1
= o
->g_in2
= true;
4897 #define SPEC_in2_x2_o SPEC_r2_f128
4899 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4901 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4903 #define SPEC_in2_ra2 0
4905 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4907 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4908 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4910 #define SPEC_in2_a2 0
4912 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4914 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4916 #define SPEC_in2_ri2 0
4918 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4920 help_l2_shift(s
, f
, o
, 31);
4922 #define SPEC_in2_sh32 0
4924 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4926 help_l2_shift(s
, f
, o
, 63);
4928 #define SPEC_in2_sh64 0
4930 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4933 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4935 #define SPEC_in2_m2_8u 0
4937 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4940 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4942 #define SPEC_in2_m2_16s 0
4944 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4947 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4949 #define SPEC_in2_m2_16u 0
4951 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4954 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4956 #define SPEC_in2_m2_32s 0
4958 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4961 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4963 #define SPEC_in2_m2_32u 0
4965 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4968 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4970 #define SPEC_in2_m2_64 0
4972 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4975 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4977 #define SPEC_in2_mri2_16u 0
4979 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4982 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4984 #define SPEC_in2_mri2_32s 0
4986 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4989 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4991 #define SPEC_in2_mri2_32u 0
4993 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4996 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4998 #define SPEC_in2_mri2_64 0
5000 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5002 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5004 #define SPEC_in2_i2 0
5006 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5008 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5010 #define SPEC_in2_i2_8u 0
5012 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5014 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5016 #define SPEC_in2_i2_16u 0
5018 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5020 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5022 #define SPEC_in2_i2_32u 0
5024 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5026 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5027 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5029 #define SPEC_in2_i2_16u_shl 0
5031 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5033 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5034 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5036 #define SPEC_in2_i2_32u_shl 0
5038 #ifndef CONFIG_USER_ONLY
5039 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5041 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5043 #define SPEC_in2_insn 0
5046 /* ====================================================================== */
5048 /* Find opc within the table of insns. This is formulated as a switch
5049 statement so that (1) we get compile-time notice of cut-paste errors
5050 for duplicated opcodes, and (2) the compiler generates the binary
5051 search tree, rather than us having to post-process the table. */
5053 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5054 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5056 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5058 enum DisasInsnEnum
{
5059 #include "insn-data.def"
5063 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5067 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5069 .help_in1 = in1_##I1, \
5070 .help_in2 = in2_##I2, \
5071 .help_prep = prep_##P, \
5072 .help_wout = wout_##W, \
5073 .help_cout = cout_##CC, \
5074 .help_op = op_##OP, \
5078 /* Allow 0 to be used for NULL in the table below. */
5086 #define SPEC_in1_0 0
5087 #define SPEC_in2_0 0
5088 #define SPEC_prep_0 0
5089 #define SPEC_wout_0 0
5091 static const DisasInsn insn_info
[] = {
5092 #include "insn-data.def"
5096 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5097 case OPC: return &insn_info[insn_ ## NM];
5099 static const DisasInsn
*lookup_opc(uint16_t opc
)
5102 #include "insn-data.def"
5111 /* Extract a field from the insn. The INSN should be left-aligned in
5112 the uint64_t so that we can more easily utilize the big-bit-endian
5113 definitions we extract from the Principals of Operation. */
5115 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5123 /* Zero extract the field from the insn. */
5124 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5126 /* Sign-extend, or un-swap the field as necessary. */
5128 case 0: /* unsigned */
5130 case 1: /* signed */
5131 assert(f
->size
<= 32);
5132 m
= 1u << (f
->size
- 1);
5135 case 2: /* dl+dh split, signed 20 bit. */
5136 r
= ((int8_t)r
<< 12) | (r
>> 8);
5142 /* Validate that the "compressed" encoding we selected above is valid.
5143 I.e. we havn't make two different original fields overlap. */
5144 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5145 o
->presentC
|= 1 << f
->indexC
;
5146 o
->presentO
|= 1 << f
->indexO
;
5148 o
->c
[f
->indexC
] = r
;
5151 /* Lookup the insn at the current PC, extracting the operands into O and
5152 returning the info struct for the insn. Returns NULL for invalid insn. */
5154 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5157 uint64_t insn
, pc
= s
->pc
;
5159 const DisasInsn
*info
;
5161 insn
= ld_code2(env
, pc
);
5162 op
= (insn
>> 8) & 0xff;
5163 ilen
= get_ilen(op
);
5164 s
->next_pc
= s
->pc
+ ilen
;
5171 insn
= ld_code4(env
, pc
) << 32;
5174 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5180 /* We can't actually determine the insn format until we've looked up
5181 the full insn opcode. Which we can't do without locating the
5182 secondary opcode. Assume by default that OP2 is at bit 40; for
5183 those smaller insns that don't actually have a secondary opcode
5184 this will correctly result in OP2 = 0. */
5190 case 0xb2: /* S, RRF, RRE */
5191 case 0xb3: /* RRE, RRD, RRF */
5192 case 0xb9: /* RRE, RRF */
5193 case 0xe5: /* SSE, SIL */
5194 op2
= (insn
<< 8) >> 56;
5198 case 0xc0: /* RIL */
5199 case 0xc2: /* RIL */
5200 case 0xc4: /* RIL */
5201 case 0xc6: /* RIL */
5202 case 0xc8: /* SSF */
5203 case 0xcc: /* RIL */
5204 op2
= (insn
<< 12) >> 60;
5206 case 0xd0 ... 0xdf: /* SS */
5212 case 0xee ... 0xf3: /* SS */
5213 case 0xf8 ... 0xfd: /* SS */
5217 op2
= (insn
<< 40) >> 56;
5221 memset(f
, 0, sizeof(*f
));
5226 /* Lookup the instruction. */
5227 info
= lookup_opc(op
<< 8 | op2
);
5229 /* If we found it, extract the operands. */
5231 DisasFormat fmt
= info
->fmt
;
5234 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5235 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5241 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5243 const DisasInsn
*insn
;
5244 ExitStatus ret
= NO_EXIT
;
5248 /* Search for the insn in the table. */
5249 insn
= extract_insn(env
, s
, &f
);
5251 /* Not found means unimplemented/illegal opcode. */
5253 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5255 gen_illegal_opcode(s
);
5256 return EXIT_NORETURN
;
5259 #ifndef CONFIG_USER_ONLY
5260 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5261 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5262 gen_helper_per_ifetch(cpu_env
, addr
);
5263 tcg_temp_free_i64(addr
);
5267 /* Check for insn specification exceptions. */
5269 int spec
= insn
->spec
, excp
= 0, r
;
5271 if (spec
& SPEC_r1_even
) {
5272 r
= get_field(&f
, r1
);
5274 excp
= PGM_SPECIFICATION
;
5277 if (spec
& SPEC_r2_even
) {
5278 r
= get_field(&f
, r2
);
5280 excp
= PGM_SPECIFICATION
;
5283 if (spec
& SPEC_r3_even
) {
5284 r
= get_field(&f
, r3
);
5286 excp
= PGM_SPECIFICATION
;
5289 if (spec
& SPEC_r1_f128
) {
5290 r
= get_field(&f
, r1
);
5292 excp
= PGM_SPECIFICATION
;
5295 if (spec
& SPEC_r2_f128
) {
5296 r
= get_field(&f
, r2
);
5298 excp
= PGM_SPECIFICATION
;
5302 gen_program_exception(s
, excp
);
5303 return EXIT_NORETURN
;
5307 /* Set up the strutures we use to communicate with the helpers. */
5310 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5311 TCGV_UNUSED_I64(o
.out
);
5312 TCGV_UNUSED_I64(o
.out2
);
5313 TCGV_UNUSED_I64(o
.in1
);
5314 TCGV_UNUSED_I64(o
.in2
);
5315 TCGV_UNUSED_I64(o
.addr1
);
5317 /* Implement the instruction. */
5318 if (insn
->help_in1
) {
5319 insn
->help_in1(s
, &f
, &o
);
5321 if (insn
->help_in2
) {
5322 insn
->help_in2(s
, &f
, &o
);
5324 if (insn
->help_prep
) {
5325 insn
->help_prep(s
, &f
, &o
);
5327 if (insn
->help_op
) {
5328 ret
= insn
->help_op(s
, &o
);
5330 if (insn
->help_wout
) {
5331 insn
->help_wout(s
, &f
, &o
);
5333 if (insn
->help_cout
) {
5334 insn
->help_cout(s
, &o
);
5337 /* Free any temporaries created by the helpers. */
5338 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5339 tcg_temp_free_i64(o
.out
);
5341 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5342 tcg_temp_free_i64(o
.out2
);
5344 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5345 tcg_temp_free_i64(o
.in1
);
5347 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5348 tcg_temp_free_i64(o
.in2
);
5350 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5351 tcg_temp_free_i64(o
.addr1
);
5354 #ifndef CONFIG_USER_ONLY
5355 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5356 /* An exception might be triggered, save PSW if not already done. */
5357 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5358 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5364 /* Call the helper to check for a possible PER exception. */
5365 gen_helper_per_check_exception(cpu_env
);
5369 /* Advance to the next instruction. */
5374 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5376 S390CPU
*cpu
= s390_env_get_cpu(env
);
5377 CPUState
*cs
= CPU(cpu
);
5379 target_ulong pc_start
;
5380 uint64_t next_page_start
;
5381 int num_insns
, max_insns
;
5388 if (!(tb
->flags
& FLAG_MASK_64
)) {
5389 pc_start
&= 0x7fffffff;
5394 dc
.cc_op
= CC_OP_DYNAMIC
;
5395 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5397 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5400 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5401 if (max_insns
== 0) {
5402 max_insns
= CF_COUNT_MASK
;
5404 if (max_insns
> TCG_MAX_INSNS
) {
5405 max_insns
= TCG_MAX_INSNS
;
5411 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5414 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5415 status
= EXIT_PC_STALE
;
5417 /* The address covered by the breakpoint must be included in
5418 [tb->pc, tb->pc + tb->size) in order to for it to be
5419 properly cleared -- thus we increment the PC here so that
5420 the logic setting tb->size below does the right thing. */
5425 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5430 if (status
== NO_EXIT
) {
5431 status
= translate_one(env
, &dc
);
5434 /* If we reach a page boundary, are single stepping,
5435 or exhaust instruction count, stop generation. */
5436 if (status
== NO_EXIT
5437 && (dc
.pc
>= next_page_start
5438 || tcg_op_buf_full()
5439 || num_insns
>= max_insns
5441 || cs
->singlestep_enabled
)) {
5442 status
= EXIT_PC_STALE
;
5444 } while (status
== NO_EXIT
);
5446 if (tb
->cflags
& CF_LAST_IO
) {
5455 update_psw_addr(&dc
);
5457 case EXIT_PC_UPDATED
:
5458 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5459 cc op type is in env */
5461 /* Exit the TB, either by raising a debug exception or by return. */
5463 gen_exception(EXCP_DEBUG
);
5472 gen_tb_end(tb
, num_insns
);
5474 tb
->size
= dc
.pc
- pc_start
;
5475 tb
->icount
= num_insns
;
5477 #if defined(S390X_DEBUG_DISAS)
5478 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5479 && qemu_log_in_addr_range(pc_start
)) {
5481 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5482 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5489 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5492 int cc_op
= data
[1];
5493 env
->psw
.addr
= data
[0];
5494 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {