4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
62 bool singlestep_enabled
;
65 /* Information carried about a condition to be evaluated. */
72 struct { TCGv_i64 a
, b
; } s64
;
73 struct { TCGv_i32 a
, b
; } s32
;
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
97 S390CPU
*cpu
= S390_CPU(cs
);
98 CPUS390XState
*env
= &cpu
->env
;
101 if (env
->cc_op
> 3) {
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
103 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
105 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
106 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
109 for (i
= 0; i
< 16; i
++) {
110 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
112 cpu_fprintf(f
, "\n");
118 for (i
= 0; i
< 16; i
++) {
119 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
121 cpu_fprintf(f
, "\n");
127 for (i
= 0; i
< 32; i
++) {
128 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
129 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
130 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i
= 0; i
< 16; i
++) {
135 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
137 cpu_fprintf(f
, "\n");
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i
= 0; i
< CC_OP_MAX
; i
++) {
146 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
147 inline_branch_miss
[i
], inline_branch_hit
[i
]);
151 cpu_fprintf(f
, "\n");
154 static TCGv_i64 psw_addr
;
155 static TCGv_i64 psw_mask
;
156 static TCGv_i64 gbea
;
158 static TCGv_i32 cc_op
;
159 static TCGv_i64 cc_src
;
160 static TCGv_i64 cc_dst
;
161 static TCGv_i64 cc_vr
;
163 static char cpu_reg_names
[32][4];
164 static TCGv_i64 regs
[16];
165 static TCGv_i64 fregs
[16];
167 void s390x_translate_init(void)
171 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
172 tcg_ctx
.tcg_env
= cpu_env
;
173 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
174 offsetof(CPUS390XState
, psw
.addr
),
176 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
177 offsetof(CPUS390XState
, psw
.mask
),
179 gbea
= tcg_global_mem_new_i64(cpu_env
,
180 offsetof(CPUS390XState
, gbea
),
183 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
185 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
187 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
189 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
192 for (i
= 0; i
< 16; i
++) {
193 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
194 regs
[i
] = tcg_global_mem_new(cpu_env
,
195 offsetof(CPUS390XState
, regs
[i
]),
199 for (i
= 0; i
< 16; i
++) {
200 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
201 fregs
[i
] = tcg_global_mem_new(cpu_env
,
202 offsetof(CPUS390XState
, vregs
[i
][0].d
),
203 cpu_reg_names
[i
+ 16]);
207 static TCGv_i64
load_reg(int reg
)
209 TCGv_i64 r
= tcg_temp_new_i64();
210 tcg_gen_mov_i64(r
, regs
[reg
]);
214 static TCGv_i64
load_freg32_i64(int reg
)
216 TCGv_i64 r
= tcg_temp_new_i64();
217 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
221 static void store_reg(int reg
, TCGv_i64 v
)
223 tcg_gen_mov_i64(regs
[reg
], v
);
226 static void store_freg(int reg
, TCGv_i64 v
)
228 tcg_gen_mov_i64(fregs
[reg
], v
);
231 static void store_reg32_i64(int reg
, TCGv_i64 v
)
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
237 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
239 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
242 static void store_freg32_i64(int reg
, TCGv_i64 v
)
244 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
247 static void return_low128(TCGv_i64 dest
)
249 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
252 static void update_psw_addr(DisasContext
*s
)
255 tcg_gen_movi_i64(psw_addr
, s
->pc
);
258 static void per_branch(DisasContext
*s
, bool to_next
)
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea
, s
->pc
);
263 if (s
->tb
->flags
& FLAG_MASK_PER
) {
264 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
265 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
267 tcg_temp_free_i64(next_pc
);
273 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
274 TCGv_i64 arg1
, TCGv_i64 arg2
)
276 #ifndef CONFIG_USER_ONLY
277 if (s
->tb
->flags
& FLAG_MASK_PER
) {
278 TCGLabel
*lab
= gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
281 tcg_gen_movi_i64(gbea
, s
->pc
);
282 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
286 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
287 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
288 tcg_temp_free_i64(pc
);
293 static void per_breaking_event(DisasContext
*s
)
295 tcg_gen_movi_i64(gbea
, s
->pc
);
298 static void update_cc_op(DisasContext
*s
)
300 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
301 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
305 static void potential_page_fault(DisasContext
*s
)
311 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
313 return (uint64_t)cpu_lduw_code(env
, pc
);
316 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
321 static int get_mem_index(DisasContext
*s
)
323 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
324 case PSW_ASC_PRIMARY
>> 32:
326 case PSW_ASC_SECONDARY
>> 32:
328 case PSW_ASC_HOME
>> 32:
336 static void gen_exception(int excp
)
338 TCGv_i32 tmp
= tcg_const_i32(excp
);
339 gen_helper_exception(cpu_env
, tmp
);
340 tcg_temp_free_i32(tmp
);
343 static void gen_program_exception(DisasContext
*s
, int code
)
347 /* Remember what pgm exeption this was. */
348 tmp
= tcg_const_i32(code
);
349 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
350 tcg_temp_free_i32(tmp
);
352 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
353 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
354 tcg_temp_free_i32(tmp
);
356 /* Advance past instruction. */
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM
);
367 static inline void gen_illegal_opcode(DisasContext
*s
)
369 gen_program_exception(s
, PGM_OPERATION
);
372 static inline void gen_trap(DisasContext
*s
)
376 /* Set DXC to 0xff. */
377 t
= tcg_temp_new_i32();
378 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
379 tcg_gen_ori_i32(t
, t
, 0xff00);
380 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
381 tcg_temp_free_i32(t
);
383 gen_program_exception(s
, PGM_DATA
);
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext
*s
)
389 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
390 gen_program_exception(s
, PGM_PRIVILEGED
);
395 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
397 TCGv_i64 tmp
= tcg_temp_new_i64();
398 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
403 /* Note that addi optimizes the imm==0 case. */
405 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
406 tcg_gen_addi_i64(tmp
, tmp
, d2
);
408 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
410 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
416 tcg_gen_movi_i64(tmp
, d2
);
419 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
425 static inline bool live_cc_data(DisasContext
*s
)
427 return (s
->cc_op
!= CC_OP_DYNAMIC
428 && s
->cc_op
!= CC_OP_STATIC
432 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
434 if (live_cc_data(s
)) {
435 tcg_gen_discard_i64(cc_src
);
436 tcg_gen_discard_i64(cc_dst
);
437 tcg_gen_discard_i64(cc_vr
);
439 s
->cc_op
= CC_OP_CONST0
+ val
;
442 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
444 if (live_cc_data(s
)) {
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_discard_i64(cc_vr
);
448 tcg_gen_mov_i64(cc_dst
, dst
);
452 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
455 if (live_cc_data(s
)) {
456 tcg_gen_discard_i64(cc_vr
);
458 tcg_gen_mov_i64(cc_src
, src
);
459 tcg_gen_mov_i64(cc_dst
, dst
);
463 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
464 TCGv_i64 dst
, TCGv_i64 vr
)
466 tcg_gen_mov_i64(cc_src
, src
);
467 tcg_gen_mov_i64(cc_dst
, dst
);
468 tcg_gen_mov_i64(cc_vr
, vr
);
472 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
474 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
477 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
479 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
482 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
484 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
487 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
489 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext
*s
)
495 if (live_cc_data(s
)) {
496 tcg_gen_discard_i64(cc_src
);
497 tcg_gen_discard_i64(cc_dst
);
498 tcg_gen_discard_i64(cc_vr
);
500 s
->cc_op
= CC_OP_STATIC
;
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext
*s
)
506 TCGv_i32 local_cc_op
;
509 TCGV_UNUSED_I32(local_cc_op
);
510 TCGV_UNUSED_I64(dummy
);
513 dummy
= tcg_const_i64(0);
527 local_cc_op
= tcg_const_i32(s
->cc_op
);
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
547 /* env->cc_op already is the cc value */
562 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
567 case CC_OP_LTUGTU_32
:
568 case CC_OP_LTUGTU_64
:
575 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
590 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
600 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
601 tcg_temp_free_i32(local_cc_op
);
603 if (!TCGV_IS_UNUSED_I64(dummy
)) {
604 tcg_temp_free_i64(dummy
);
607 /* We now have cc in cc_op as constant */
611 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
613 if (unlikely(s
->singlestep_enabled
) ||
614 (s
->tb
->cflags
& CF_LAST_IO
) ||
615 (s
->tb
->flags
& FLAG_MASK_PER
)) {
618 #ifndef CONFIG_USER_ONLY
619 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
620 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
626 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
628 #ifdef DEBUG_INLINE_BRANCHES
629 inline_branch_miss
[cc_op
]++;
633 static void account_inline_branch(DisasContext
*s
, int cc_op
)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_hit
[cc_op
]++;
640 /* Table of mask values to comparison codes, given a comparison as input.
641 For such, CC=3 should not be possible. */
642 static const TCGCond ltgt_cond
[16] = {
643 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
644 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
645 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
646 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
647 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
648 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
649 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
650 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
653 /* Table of mask values to comparison codes, given a logic op as input.
654 For such, only CC=0 and CC=1 should be possible. */
655 static const TCGCond nz_cond
[16] = {
656 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
657 TCG_COND_NEVER
, TCG_COND_NEVER
,
658 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
659 TCG_COND_NE
, TCG_COND_NE
,
660 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
661 TCG_COND_EQ
, TCG_COND_EQ
,
662 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
663 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
666 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
667 details required to generate a TCG comparison. */
668 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
671 enum cc_op old_cc_op
= s
->cc_op
;
673 if (mask
== 15 || mask
== 0) {
674 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
677 c
->g1
= c
->g2
= true;
682 /* Find the TCG condition for the mask + cc op. */
688 cond
= ltgt_cond
[mask
];
689 if (cond
== TCG_COND_NEVER
) {
692 account_inline_branch(s
, old_cc_op
);
695 case CC_OP_LTUGTU_32
:
696 case CC_OP_LTUGTU_64
:
697 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
698 if (cond
== TCG_COND_NEVER
) {
701 account_inline_branch(s
, old_cc_op
);
705 cond
= nz_cond
[mask
];
706 if (cond
== TCG_COND_NEVER
) {
709 account_inline_branch(s
, old_cc_op
);
724 account_inline_branch(s
, old_cc_op
);
739 account_inline_branch(s
, old_cc_op
);
743 switch (mask
& 0xa) {
744 case 8: /* src == 0 -> no one bit found */
747 case 2: /* src != 0 -> one bit found */
753 account_inline_branch(s
, old_cc_op
);
759 case 8 | 2: /* vr == 0 */
762 case 4 | 1: /* vr != 0 */
765 case 8 | 4: /* no carry -> vr >= src */
768 case 2 | 1: /* carry -> vr < src */
774 account_inline_branch(s
, old_cc_op
);
779 /* Note that CC=0 is impossible; treat it as dont-care. */
781 case 2: /* zero -> op1 == op2 */
784 case 4 | 1: /* !zero -> op1 != op2 */
787 case 4: /* borrow (!carry) -> op1 < op2 */
790 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
796 account_inline_branch(s
, old_cc_op
);
801 /* Calculate cc value. */
806 /* Jump based on CC. We'll load up the real cond below;
807 the assignment here merely avoids a compiler warning. */
808 account_noninline_branch(s
, old_cc_op
);
809 old_cc_op
= CC_OP_STATIC
;
810 cond
= TCG_COND_NEVER
;
814 /* Load up the arguments of the comparison. */
816 c
->g1
= c
->g2
= false;
820 c
->u
.s32
.a
= tcg_temp_new_i32();
821 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
822 c
->u
.s32
.b
= tcg_const_i32(0);
825 case CC_OP_LTUGTU_32
:
828 c
->u
.s32
.a
= tcg_temp_new_i32();
829 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
830 c
->u
.s32
.b
= tcg_temp_new_i32();
831 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
838 c
->u
.s64
.b
= tcg_const_i64(0);
842 case CC_OP_LTUGTU_64
:
846 c
->g1
= c
->g2
= true;
852 c
->u
.s64
.a
= tcg_temp_new_i64();
853 c
->u
.s64
.b
= tcg_const_i64(0);
854 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
859 c
->u
.s32
.a
= tcg_temp_new_i32();
860 c
->u
.s32
.b
= tcg_temp_new_i32();
861 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
862 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
863 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
865 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
872 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
873 c
->u
.s64
.b
= tcg_const_i64(0);
885 case 0x8 | 0x4 | 0x2: /* cc != 3 */
887 c
->u
.s32
.b
= tcg_const_i32(3);
889 case 0x8 | 0x4 | 0x1: /* cc != 2 */
891 c
->u
.s32
.b
= tcg_const_i32(2);
893 case 0x8 | 0x2 | 0x1: /* cc != 1 */
895 c
->u
.s32
.b
= tcg_const_i32(1);
897 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
900 c
->u
.s32
.a
= tcg_temp_new_i32();
901 c
->u
.s32
.b
= tcg_const_i32(0);
902 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
904 case 0x8 | 0x4: /* cc < 2 */
906 c
->u
.s32
.b
= tcg_const_i32(2);
908 case 0x8: /* cc == 0 */
910 c
->u
.s32
.b
= tcg_const_i32(0);
912 case 0x4 | 0x2 | 0x1: /* cc != 0 */
914 c
->u
.s32
.b
= tcg_const_i32(0);
916 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
919 c
->u
.s32
.a
= tcg_temp_new_i32();
920 c
->u
.s32
.b
= tcg_const_i32(0);
921 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
923 case 0x4: /* cc == 1 */
925 c
->u
.s32
.b
= tcg_const_i32(1);
927 case 0x2 | 0x1: /* cc > 1 */
929 c
->u
.s32
.b
= tcg_const_i32(1);
931 case 0x2: /* cc == 2 */
933 c
->u
.s32
.b
= tcg_const_i32(2);
935 case 0x1: /* cc == 3 */
937 c
->u
.s32
.b
= tcg_const_i32(3);
940 /* CC is masked by something else: (8 >> cc) & mask. */
943 c
->u
.s32
.a
= tcg_const_i32(8);
944 c
->u
.s32
.b
= tcg_const_i32(0);
945 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
946 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
957 static void free_compare(DisasCompare
*c
)
961 tcg_temp_free_i64(c
->u
.s64
.a
);
963 tcg_temp_free_i32(c
->u
.s32
.a
);
968 tcg_temp_free_i64(c
->u
.s64
.b
);
970 tcg_temp_free_i32(c
->u
.s32
.b
);
975 /* ====================================================================== */
976 /* Define the insn format enumeration. */
977 #define F0(N) FMT_##N,
978 #define F1(N, X1) F0(N)
979 #define F2(N, X1, X2) F0(N)
980 #define F3(N, X1, X2, X3) F0(N)
981 #define F4(N, X1, X2, X3, X4) F0(N)
982 #define F5(N, X1, X2, X3, X4, X5) F0(N)
985 #include "insn-format.def"
995 /* Define a structure to hold the decoded fields. We'll store each inside
996 an array indexed by an enum. In order to conserve memory, we'll arrange
997 for fields that do not exist at the same time to overlap, thus the "C"
998 for compact. For checking purposes there is an "O" for original index
999 as well that will be applied to availability bitmaps. */
1001 enum DisasFieldIndexO
{
1024 enum DisasFieldIndexC
{
1055 struct DisasFields
{
1059 unsigned presentC
:16;
1060 unsigned int presentO
;
1064 /* This is the way fields are to be accessed out of DisasFields. */
1065 #define have_field(S, F) have_field1((S), FLD_O_##F)
1066 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1068 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1070 return (f
->presentO
>> c
) & 1;
1073 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1074 enum DisasFieldIndexC c
)
1076 assert(have_field1(f
, o
));
1080 /* Describe the layout of each field in each format. */
1081 typedef struct DisasField
{
1083 unsigned int size
:8;
1084 unsigned int type
:2;
1085 unsigned int indexC
:6;
1086 enum DisasFieldIndexO indexO
:8;
1089 typedef struct DisasFormatInfo
{
1090 DisasField op
[NUM_C_FIELD
];
1093 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1094 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1095 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1097 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1098 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1099 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1100 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1102 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1105 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1106 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1108 #define F0(N) { { } },
1109 #define F1(N, X1) { { X1 } },
1110 #define F2(N, X1, X2) { { X1, X2 } },
1111 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1112 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1113 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1115 static const DisasFormatInfo format_info
[] = {
1116 #include "insn-format.def"
1134 /* Generally, we'll extract operands into this structures, operate upon
1135 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1136 of routines below for more details. */
1138 bool g_out
, g_out2
, g_in1
, g_in2
;
1139 TCGv_i64 out
, out2
, in1
, in2
;
1143 /* Instructions can place constraints on their operands, raising specification
1144 exceptions if they are violated. To make this easy to automate, each "in1",
1145 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1146 of the following, or 0. To make this easy to document, we'll put the
1147 SPEC_<name> defines next to <name>. */
1149 #define SPEC_r1_even 1
1150 #define SPEC_r2_even 2
1151 #define SPEC_r3_even 4
1152 #define SPEC_r1_f128 8
1153 #define SPEC_r2_f128 16
1155 /* Return values from translate_one, indicating the state of the TB. */
1157 /* Continue the TB. */
1159 /* We have emitted one or more goto_tb. No fixup required. */
1161 /* We are not using a goto_tb (for whatever reason), but have updated
1162 the PC (for whatever reason), so there's no need to do it again on
1165 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1166 updated the PC for the next instruction to be executed. */
1168 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1169 No following code will be executed. */
1173 typedef enum DisasFacility
{
1174 FAC_Z
, /* zarch (default) */
1175 FAC_CASS
, /* compare and swap and store */
1176 FAC_CASS2
, /* compare and swap and store 2*/
1177 FAC_DFP
, /* decimal floating point */
1178 FAC_DFPR
, /* decimal floating point rounding */
1179 FAC_DO
, /* distinct operands */
1180 FAC_EE
, /* execute extensions */
1181 FAC_EI
, /* extended immediate */
1182 FAC_FPE
, /* floating point extension */
1183 FAC_FPSSH
, /* floating point support sign handling */
1184 FAC_FPRGR
, /* FPR-GR transfer */
1185 FAC_GIE
, /* general instructions extension */
1186 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1187 FAC_HW
, /* high-word */
1188 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1189 FAC_MIE
, /* miscellaneous-instruction-extensions */
1190 FAC_LAT
, /* load-and-trap */
1191 FAC_LOC
, /* load/store on condition */
1192 FAC_LD
, /* long displacement */
1193 FAC_PC
, /* population count */
1194 FAC_SCF
, /* store clock fast */
1195 FAC_SFLE
, /* store facility list extended */
1196 FAC_ILA
, /* interlocked access facility 1 */
1202 DisasFacility fac
:8;
1207 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1208 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1209 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1210 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1211 void (*help_cout
)(DisasContext
*, DisasOps
*);
1212 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1217 /* ====================================================================== */
1218 /* Miscellaneous helpers, used by several operations. */
1220 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1221 DisasOps
*o
, int mask
)
1223 int b2
= get_field(f
, b2
);
1224 int d2
= get_field(f
, d2
);
1227 o
->in2
= tcg_const_i64(d2
& mask
);
1229 o
->in2
= get_address(s
, 0, b2
, d2
);
1230 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1234 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1236 if (dest
== s
->next_pc
) {
1237 per_branch(s
, true);
1240 if (use_goto_tb(s
, dest
)) {
1242 per_breaking_event(s
);
1244 tcg_gen_movi_i64(psw_addr
, dest
);
1245 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1246 return EXIT_GOTO_TB
;
1248 tcg_gen_movi_i64(psw_addr
, dest
);
1249 per_branch(s
, false);
1250 return EXIT_PC_UPDATED
;
1254 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1255 bool is_imm
, int imm
, TCGv_i64 cdest
)
1258 uint64_t dest
= s
->pc
+ 2 * imm
;
1261 /* Take care of the special cases first. */
1262 if (c
->cond
== TCG_COND_NEVER
) {
1267 if (dest
== s
->next_pc
) {
1268 /* Branch to next. */
1269 per_branch(s
, true);
1273 if (c
->cond
== TCG_COND_ALWAYS
) {
1274 ret
= help_goto_direct(s
, dest
);
1278 if (TCGV_IS_UNUSED_I64(cdest
)) {
1279 /* E.g. bcr %r0 -> no branch. */
1283 if (c
->cond
== TCG_COND_ALWAYS
) {
1284 tcg_gen_mov_i64(psw_addr
, cdest
);
1285 per_branch(s
, false);
1286 ret
= EXIT_PC_UPDATED
;
1291 if (use_goto_tb(s
, s
->next_pc
)) {
1292 if (is_imm
&& use_goto_tb(s
, dest
)) {
1293 /* Both exits can use goto_tb. */
1296 lab
= gen_new_label();
1298 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1300 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1303 /* Branch not taken. */
1305 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1306 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1310 per_breaking_event(s
);
1312 tcg_gen_movi_i64(psw_addr
, dest
);
1313 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1317 /* Fallthru can use goto_tb, but taken branch cannot. */
1318 /* Store taken branch destination before the brcond. This
1319 avoids having to allocate a new local temp to hold it.
1320 We'll overwrite this in the not taken case anyway. */
1322 tcg_gen_mov_i64(psw_addr
, cdest
);
1325 lab
= gen_new_label();
1327 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1329 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1332 /* Branch not taken. */
1335 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1336 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1340 tcg_gen_movi_i64(psw_addr
, dest
);
1342 per_breaking_event(s
);
1343 ret
= EXIT_PC_UPDATED
;
1346 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1347 Most commonly we're single-stepping or some other condition that
1348 disables all use of goto_tb. Just update the PC and exit. */
1350 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1352 cdest
= tcg_const_i64(dest
);
1356 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1358 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1360 TCGv_i32 t0
= tcg_temp_new_i32();
1361 TCGv_i64 t1
= tcg_temp_new_i64();
1362 TCGv_i64 z
= tcg_const_i64(0);
1363 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1364 tcg_gen_extu_i32_i64(t1
, t0
);
1365 tcg_temp_free_i32(t0
);
1366 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1367 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1368 tcg_temp_free_i64(t1
);
1369 tcg_temp_free_i64(z
);
1373 tcg_temp_free_i64(cdest
);
1375 tcg_temp_free_i64(next
);
1377 ret
= EXIT_PC_UPDATED
;
1385 /* ====================================================================== */
1386 /* The operations. These perform the bulk of the work for any insn,
1387 usually after the operands have been loaded and output initialized. */
1389 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1392 z
= tcg_const_i64(0);
1393 n
= tcg_temp_new_i64();
1394 tcg_gen_neg_i64(n
, o
->in2
);
1395 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1396 tcg_temp_free_i64(n
);
1397 tcg_temp_free_i64(z
);
1401 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1403 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1407 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1409 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1413 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1415 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1416 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1420 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1422 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1426 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1431 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1433 /* The carry flag is the msb of CC, therefore the branch mask that would
1434 create that comparison is 3. Feeding the generated comparison to
1435 setcond produces the carry flag that we desire. */
1436 disas_jcc(s
, &cmp
, 3);
1437 carry
= tcg_temp_new_i64();
1439 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1441 TCGv_i32 t
= tcg_temp_new_i32();
1442 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1443 tcg_gen_extu_i32_i64(carry
, t
);
1444 tcg_temp_free_i32(t
);
1448 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1449 tcg_temp_free_i64(carry
);
1453 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1455 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1459 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1461 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1465 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1467 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1468 return_low128(o
->out2
);
1472 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1474 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1478 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1480 int shift
= s
->insn
->data
& 0xff;
1481 int size
= s
->insn
->data
>> 8;
1482 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1485 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1486 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1487 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1489 /* Produce the CC from only the bits manipulated. */
1490 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1491 set_cc_nz_u64(s
, cc_dst
);
1495 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1497 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1498 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1499 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1500 per_branch(s
, false);
1501 return EXIT_PC_UPDATED
;
1507 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1509 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1510 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1513 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1515 int m1
= get_field(s
->fields
, m1
);
1516 bool is_imm
= have_field(s
->fields
, i2
);
1517 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1520 disas_jcc(s
, &c
, m1
);
1521 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1524 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1526 int r1
= get_field(s
->fields
, r1
);
1527 bool is_imm
= have_field(s
->fields
, i2
);
1528 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1532 c
.cond
= TCG_COND_NE
;
1537 t
= tcg_temp_new_i64();
1538 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1539 store_reg32_i64(r1
, t
);
1540 c
.u
.s32
.a
= tcg_temp_new_i32();
1541 c
.u
.s32
.b
= tcg_const_i32(0);
1542 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1543 tcg_temp_free_i64(t
);
1545 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1548 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1550 int r1
= get_field(s
->fields
, r1
);
1551 int imm
= get_field(s
->fields
, i2
);
1555 c
.cond
= TCG_COND_NE
;
1560 t
= tcg_temp_new_i64();
1561 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1562 tcg_gen_subi_i64(t
, t
, 1);
1563 store_reg32h_i64(r1
, t
);
1564 c
.u
.s32
.a
= tcg_temp_new_i32();
1565 c
.u
.s32
.b
= tcg_const_i32(0);
1566 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1567 tcg_temp_free_i64(t
);
1569 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1572 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1574 int r1
= get_field(s
->fields
, r1
);
1575 bool is_imm
= have_field(s
->fields
, i2
);
1576 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1579 c
.cond
= TCG_COND_NE
;
1584 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1585 c
.u
.s64
.a
= regs
[r1
];
1586 c
.u
.s64
.b
= tcg_const_i64(0);
1588 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1591 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1593 int r1
= get_field(s
->fields
, r1
);
1594 int r3
= get_field(s
->fields
, r3
);
1595 bool is_imm
= have_field(s
->fields
, i2
);
1596 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1600 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1605 t
= tcg_temp_new_i64();
1606 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1607 c
.u
.s32
.a
= tcg_temp_new_i32();
1608 c
.u
.s32
.b
= tcg_temp_new_i32();
1609 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1610 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1611 store_reg32_i64(r1
, t
);
1612 tcg_temp_free_i64(t
);
1614 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1617 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1619 int r1
= get_field(s
->fields
, r1
);
1620 int r3
= get_field(s
->fields
, r3
);
1621 bool is_imm
= have_field(s
->fields
, i2
);
1622 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1625 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1628 if (r1
== (r3
| 1)) {
1629 c
.u
.s64
.b
= load_reg(r3
| 1);
1632 c
.u
.s64
.b
= regs
[r3
| 1];
1636 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1637 c
.u
.s64
.a
= regs
[r1
];
1640 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1643 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1645 int imm
, m3
= get_field(s
->fields
, m3
);
1649 c
.cond
= ltgt_cond
[m3
];
1650 if (s
->insn
->data
) {
1651 c
.cond
= tcg_unsigned_cond(c
.cond
);
1653 c
.is_64
= c
.g1
= c
.g2
= true;
1657 is_imm
= have_field(s
->fields
, i4
);
1659 imm
= get_field(s
->fields
, i4
);
1662 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1663 get_field(s
->fields
, d4
));
1666 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1669 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1671 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1676 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1678 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1683 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1685 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1690 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1692 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1693 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1694 tcg_temp_free_i32(m3
);
1695 gen_set_cc_nz_f32(s
, o
->in2
);
1699 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1701 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1702 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1703 tcg_temp_free_i32(m3
);
1704 gen_set_cc_nz_f64(s
, o
->in2
);
1708 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1710 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1711 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1712 tcg_temp_free_i32(m3
);
1713 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1717 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1719 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1720 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1721 tcg_temp_free_i32(m3
);
1722 gen_set_cc_nz_f32(s
, o
->in2
);
1726 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1728 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1729 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1730 tcg_temp_free_i32(m3
);
1731 gen_set_cc_nz_f64(s
, o
->in2
);
1735 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1737 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1738 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1739 tcg_temp_free_i32(m3
);
1740 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1744 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1746 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1747 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1748 tcg_temp_free_i32(m3
);
1749 gen_set_cc_nz_f32(s
, o
->in2
);
1753 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1755 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1756 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1757 tcg_temp_free_i32(m3
);
1758 gen_set_cc_nz_f64(s
, o
->in2
);
1762 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1764 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1765 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1766 tcg_temp_free_i32(m3
);
1767 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1771 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1773 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1774 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1775 tcg_temp_free_i32(m3
);
1776 gen_set_cc_nz_f32(s
, o
->in2
);
1780 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1782 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1783 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1784 tcg_temp_free_i32(m3
);
1785 gen_set_cc_nz_f64(s
, o
->in2
);
1789 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1791 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1792 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1793 tcg_temp_free_i32(m3
);
1794 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1798 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1800 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1801 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1802 tcg_temp_free_i32(m3
);
1806 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1808 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1809 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1810 tcg_temp_free_i32(m3
);
1814 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1816 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1817 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1818 tcg_temp_free_i32(m3
);
1819 return_low128(o
->out2
);
1823 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1825 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1826 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1827 tcg_temp_free_i32(m3
);
1831 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1833 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1834 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1835 tcg_temp_free_i32(m3
);
1839 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1841 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1842 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1843 tcg_temp_free_i32(m3
);
1844 return_low128(o
->out2
);
1848 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1850 int r2
= get_field(s
->fields
, r2
);
1851 TCGv_i64 len
= tcg_temp_new_i64();
1853 potential_page_fault(s
);
1854 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1856 return_low128(o
->out
);
1858 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1859 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1860 tcg_temp_free_i64(len
);
1865 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1867 int l
= get_field(s
->fields
, l1
);
1872 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1873 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1876 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1877 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1880 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1881 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1884 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1885 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1888 potential_page_fault(s
);
1889 vl
= tcg_const_i32(l
);
1890 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1891 tcg_temp_free_i32(vl
);
1895 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1899 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1901 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1902 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1903 potential_page_fault(s
);
1904 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1905 tcg_temp_free_i32(r1
);
1906 tcg_temp_free_i32(r3
);
1911 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1913 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1914 TCGv_i32 t1
= tcg_temp_new_i32();
1915 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1916 potential_page_fault(s
);
1917 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1919 tcg_temp_free_i32(t1
);
1920 tcg_temp_free_i32(m3
);
1924 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1926 potential_page_fault(s
);
1927 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1929 return_low128(o
->in2
);
1933 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1935 TCGv_i64 t
= tcg_temp_new_i64();
1936 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1937 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1938 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1939 tcg_temp_free_i64(t
);
1943 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1945 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1946 int d2
= get_field(s
->fields
, d2
);
1947 int b2
= get_field(s
->fields
, b2
);
1948 int is_64
= s
->insn
->data
;
1949 TCGv_i64 addr
, mem
, cc
, z
;
1951 /* Note that in1 = R3 (new value) and
1952 in2 = (zero-extended) R1 (expected value). */
1954 /* Load the memory into the (temporary) output. While the PoO only talks
1955 about moving the memory to R1 on inequality, if we include equality it
1956 means that R1 is equal to the memory in all conditions. */
1957 addr
= get_address(s
, 0, b2
, d2
);
1959 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1961 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1964 /* Are the memory and expected values (un)equal? Note that this setcond
1965 produces the output CC value, thus the NE sense of the test. */
1966 cc
= tcg_temp_new_i64();
1967 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1969 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1970 Recall that we are allowed to unconditionally issue the store (and
1971 thus any possible write trap), so (re-)store the original contents
1972 of MEM in case of inequality. */
1973 z
= tcg_const_i64(0);
1974 mem
= tcg_temp_new_i64();
1975 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1977 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1979 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1981 tcg_temp_free_i64(z
);
1982 tcg_temp_free_i64(mem
);
1983 tcg_temp_free_i64(addr
);
1985 /* Store CC back to cc_op. Wait until after the store so that any
1986 exception gets the old cc_op value. */
1987 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1988 tcg_temp_free_i64(cc
);
1993 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1995 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1996 int r1
= get_field(s
->fields
, r1
);
1997 int r3
= get_field(s
->fields
, r3
);
1998 int d2
= get_field(s
->fields
, d2
);
1999 int b2
= get_field(s
->fields
, b2
);
2000 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
2002 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2004 addrh
= get_address(s
, 0, b2
, d2
);
2005 addrl
= get_address(s
, 0, b2
, d2
+ 8);
2006 outh
= tcg_temp_new_i64();
2007 outl
= tcg_temp_new_i64();
2009 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
2010 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
2012 /* Fold the double-word compare with arithmetic. */
2013 cc
= tcg_temp_new_i64();
2014 z
= tcg_temp_new_i64();
2015 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
2016 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
2017 tcg_gen_or_i64(cc
, cc
, z
);
2018 tcg_gen_movi_i64(z
, 0);
2019 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
2021 memh
= tcg_temp_new_i64();
2022 meml
= tcg_temp_new_i64();
2023 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
2024 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
2025 tcg_temp_free_i64(z
);
2027 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
2028 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
2029 tcg_temp_free_i64(memh
);
2030 tcg_temp_free_i64(meml
);
2031 tcg_temp_free_i64(addrh
);
2032 tcg_temp_free_i64(addrl
);
2034 /* Save back state now that we've passed all exceptions. */
2035 tcg_gen_mov_i64(regs
[r1
], outh
);
2036 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
2037 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2038 tcg_temp_free_i64(outh
);
2039 tcg_temp_free_i64(outl
);
2040 tcg_temp_free_i64(cc
);
2045 #ifndef CONFIG_USER_ONLY
2046 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2048 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2049 check_privileged(s
);
2050 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
2051 tcg_temp_free_i32(r1
);
2057 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2059 TCGv_i64 t1
= tcg_temp_new_i64();
2060 TCGv_i32 t2
= tcg_temp_new_i32();
2061 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2062 gen_helper_cvd(t1
, t2
);
2063 tcg_temp_free_i32(t2
);
2064 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2065 tcg_temp_free_i64(t1
);
2069 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2071 int m3
= get_field(s
->fields
, m3
);
2072 TCGLabel
*lab
= gen_new_label();
2075 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2076 if (s
->insn
->data
) {
2077 c
= tcg_unsigned_cond(c
);
2079 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2088 #ifndef CONFIG_USER_ONLY
2089 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2091 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2092 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2093 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2095 check_privileged(s
);
2099 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2101 tcg_temp_free_i32(func_code
);
2102 tcg_temp_free_i32(r3
);
2103 tcg_temp_free_i32(r1
);
2108 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2110 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2111 return_low128(o
->out
);
2115 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2117 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2118 return_low128(o
->out
);
2122 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2124 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2125 return_low128(o
->out
);
2129 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2131 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2132 return_low128(o
->out
);
2136 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2138 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2142 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2144 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2148 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2150 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2151 return_low128(o
->out2
);
2155 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2157 int r2
= get_field(s
->fields
, r2
);
2158 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2162 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2164 /* No cache information provided. */
2165 tcg_gen_movi_i64(o
->out
, -1);
2169 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2171 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2175 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2177 int r1
= get_field(s
->fields
, r1
);
2178 int r2
= get_field(s
->fields
, r2
);
2179 TCGv_i64 t
= tcg_temp_new_i64();
2181 /* Note the "subsequently" in the PoO, which implies a defined result
2182 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2183 tcg_gen_shri_i64(t
, psw_mask
, 32);
2184 store_reg32_i64(r1
, t
);
2186 store_reg32_i64(r2
, psw_mask
);
2189 tcg_temp_free_i64(t
);
2193 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2195 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2196 tb->flags, (ab)use the tb->cs_base field as the address of
2197 the template in memory, and grab 8 bits of tb->flags/cflags for
2198 the contents of the register. We would then recognize all this
2199 in gen_intermediate_code_internal, generating code for exactly
2200 one instruction. This new TB then gets executed normally.
2202 On the other hand, this seems to be mostly used for modifying
2203 MVC inside of memcpy, which needs a helper call anyway. So
2204 perhaps this doesn't bear thinking about any further. */
2211 tmp
= tcg_const_i64(s
->next_pc
);
2212 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2213 tcg_temp_free_i64(tmp
);
2218 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2220 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2221 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2222 tcg_temp_free_i32(m3
);
2226 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2228 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2229 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2230 tcg_temp_free_i32(m3
);
2234 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2236 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2237 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2238 return_low128(o
->out2
);
2239 tcg_temp_free_i32(m3
);
2243 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2245 /* We'll use the original input for cc computation, since we get to
2246 compare that against 0, which ought to be better than comparing
2247 the real output against 64. It also lets cc_dst be a convenient
2248 temporary during our computation. */
2249 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2251 /* R1 = IN ? CLZ(IN) : 64. */
2252 gen_helper_clz(o
->out
, o
->in2
);
2254 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2255 value by 64, which is undefined. But since the shift is 64 iff the
2256 input is zero, we still get the correct result after and'ing. */
2257 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2258 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2259 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2263 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2265 int m3
= get_field(s
->fields
, m3
);
2266 int pos
, len
, base
= s
->insn
->data
;
2267 TCGv_i64 tmp
= tcg_temp_new_i64();
2272 /* Effectively a 32-bit load. */
2273 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2280 /* Effectively a 16-bit load. */
2281 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2289 /* Effectively an 8-bit load. */
2290 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2295 pos
= base
+ ctz32(m3
) * 8;
2296 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2297 ccm
= ((1ull << len
) - 1) << pos
;
2301 /* This is going to be a sequence of loads and inserts. */
2302 pos
= base
+ 32 - 8;
2306 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2307 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2308 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2311 m3
= (m3
<< 1) & 0xf;
2317 tcg_gen_movi_i64(tmp
, ccm
);
2318 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2319 tcg_temp_free_i64(tmp
);
2323 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2325 int shift
= s
->insn
->data
& 0xff;
2326 int size
= s
->insn
->data
>> 8;
2327 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2331 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2336 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2338 t1
= tcg_temp_new_i64();
2339 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2340 tcg_gen_shri_i64(t1
, t1
, 36);
2341 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2343 tcg_gen_extu_i32_i64(t1
, cc_op
);
2344 tcg_gen_shli_i64(t1
, t1
, 28);
2345 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2346 tcg_temp_free_i64(t1
);
2350 #ifndef CONFIG_USER_ONLY
2351 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2353 check_privileged(s
);
2354 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2358 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2360 check_privileged(s
);
2361 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2366 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2368 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2372 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2374 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2378 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2380 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2384 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2386 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2390 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2392 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2393 return_low128(o
->out2
);
2397 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2399 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2400 return_low128(o
->out2
);
2404 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2406 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2410 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2412 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2416 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2418 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2422 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2424 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2428 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2430 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2434 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2436 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2440 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2442 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2446 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2448 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2452 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2454 TCGLabel
*lab
= gen_new_label();
2455 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2456 /* The value is stored even in case of trap. */
2457 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2463 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2465 TCGLabel
*lab
= gen_new_label();
2466 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2467 /* The value is stored even in case of trap. */
2468 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2474 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2476 TCGLabel
*lab
= gen_new_label();
2477 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2478 /* The value is stored even in case of trap. */
2479 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2485 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2487 TCGLabel
*lab
= gen_new_label();
2488 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2489 /* The value is stored even in case of trap. */
2490 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2496 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2498 TCGLabel
*lab
= gen_new_label();
2499 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2500 /* The value is stored even in case of trap. */
2501 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2507 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2511 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2514 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2518 TCGv_i32 t32
= tcg_temp_new_i32();
2521 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2524 t
= tcg_temp_new_i64();
2525 tcg_gen_extu_i32_i64(t
, t32
);
2526 tcg_temp_free_i32(t32
);
2528 z
= tcg_const_i64(0);
2529 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2530 tcg_temp_free_i64(t
);
2531 tcg_temp_free_i64(z
);
2537 #ifndef CONFIG_USER_ONLY
2538 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2540 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2541 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2542 check_privileged(s
);
2543 potential_page_fault(s
);
2544 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2545 tcg_temp_free_i32(r1
);
2546 tcg_temp_free_i32(r3
);
2550 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2552 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2553 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2554 check_privileged(s
);
2555 potential_page_fault(s
);
2556 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2557 tcg_temp_free_i32(r1
);
2558 tcg_temp_free_i32(r3
);
2561 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2563 check_privileged(s
);
2564 potential_page_fault(s
);
2565 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2570 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2574 check_privileged(s
);
2575 per_breaking_event(s
);
2577 t1
= tcg_temp_new_i64();
2578 t2
= tcg_temp_new_i64();
2579 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2580 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2581 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2582 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2583 tcg_gen_shli_i64(t1
, t1
, 32);
2584 gen_helper_load_psw(cpu_env
, t1
, t2
);
2585 tcg_temp_free_i64(t1
);
2586 tcg_temp_free_i64(t2
);
2587 return EXIT_NORETURN
;
2590 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2594 check_privileged(s
);
2595 per_breaking_event(s
);
2597 t1
= tcg_temp_new_i64();
2598 t2
= tcg_temp_new_i64();
2599 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2600 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2601 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2602 gen_helper_load_psw(cpu_env
, t1
, t2
);
2603 tcg_temp_free_i64(t1
);
2604 tcg_temp_free_i64(t2
);
2605 return EXIT_NORETURN
;
2609 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2611 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2612 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2613 potential_page_fault(s
);
2614 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2615 tcg_temp_free_i32(r1
);
2616 tcg_temp_free_i32(r3
);
2620 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2622 int r1
= get_field(s
->fields
, r1
);
2623 int r3
= get_field(s
->fields
, r3
);
2626 /* Only one register to read. */
2627 t1
= tcg_temp_new_i64();
2628 if (unlikely(r1
== r3
)) {
2629 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2630 store_reg32_i64(r1
, t1
);
2635 /* First load the values of the first and last registers to trigger
2636 possible page faults. */
2637 t2
= tcg_temp_new_i64();
2638 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2639 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2640 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2641 store_reg32_i64(r1
, t1
);
2642 store_reg32_i64(r3
, t2
);
2644 /* Only two registers to read. */
2645 if (((r1
+ 1) & 15) == r3
) {
2651 /* Then load the remaining registers. Page fault can't occur. */
2653 tcg_gen_movi_i64(t2
, 4);
2656 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2657 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2658 store_reg32_i64(r1
, t1
);
2666 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2668 int r1
= get_field(s
->fields
, r1
);
2669 int r3
= get_field(s
->fields
, r3
);
2672 /* Only one register to read. */
2673 t1
= tcg_temp_new_i64();
2674 if (unlikely(r1
== r3
)) {
2675 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2676 store_reg32h_i64(r1
, t1
);
2681 /* First load the values of the first and last registers to trigger
2682 possible page faults. */
2683 t2
= tcg_temp_new_i64();
2684 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2685 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2686 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2687 store_reg32h_i64(r1
, t1
);
2688 store_reg32h_i64(r3
, t2
);
2690 /* Only two registers to read. */
2691 if (((r1
+ 1) & 15) == r3
) {
2697 /* Then load the remaining registers. Page fault can't occur. */
2699 tcg_gen_movi_i64(t2
, 4);
2702 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2703 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2704 store_reg32h_i64(r1
, t1
);
2712 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2714 int r1
= get_field(s
->fields
, r1
);
2715 int r3
= get_field(s
->fields
, r3
);
2718 /* Only one register to read. */
2719 if (unlikely(r1
== r3
)) {
2720 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2724 /* First load the values of the first and last registers to trigger
2725 possible page faults. */
2726 t1
= tcg_temp_new_i64();
2727 t2
= tcg_temp_new_i64();
2728 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2729 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2730 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2731 tcg_gen_mov_i64(regs
[r1
], t1
);
2734 /* Only two registers to read. */
2735 if (((r1
+ 1) & 15) == r3
) {
2740 /* Then load the remaining registers. Page fault can't occur. */
2742 tcg_gen_movi_i64(t1
, 8);
2745 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2746 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2753 #ifndef CONFIG_USER_ONLY
2754 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2756 check_privileged(s
);
2757 potential_page_fault(s
);
2758 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2762 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2764 check_privileged(s
);
2765 potential_page_fault(s
);
2766 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2771 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2774 o
->g_out
= o
->g_in2
;
2775 TCGV_UNUSED_I64(o
->in2
);
2780 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2782 int b2
= get_field(s
->fields
, b2
);
2783 TCGv ar1
= tcg_temp_new_i64();
2786 o
->g_out
= o
->g_in2
;
2787 TCGV_UNUSED_I64(o
->in2
);
2790 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2791 case PSW_ASC_PRIMARY
>> 32:
2792 tcg_gen_movi_i64(ar1
, 0);
2794 case PSW_ASC_ACCREG
>> 32:
2795 tcg_gen_movi_i64(ar1
, 1);
2797 case PSW_ASC_SECONDARY
>> 32:
2799 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2801 tcg_gen_movi_i64(ar1
, 0);
2804 case PSW_ASC_HOME
>> 32:
2805 tcg_gen_movi_i64(ar1
, 2);
2809 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2810 tcg_temp_free_i64(ar1
);
2815 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2819 o
->g_out
= o
->g_in1
;
2820 o
->g_out2
= o
->g_in2
;
2821 TCGV_UNUSED_I64(o
->in1
);
2822 TCGV_UNUSED_I64(o
->in2
);
2823 o
->g_in1
= o
->g_in2
= false;
2827 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2829 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2830 potential_page_fault(s
);
2831 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2832 tcg_temp_free_i32(l
);
2836 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2838 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2839 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2840 potential_page_fault(s
);
2841 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2842 tcg_temp_free_i32(r1
);
2843 tcg_temp_free_i32(r2
);
2848 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2850 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2851 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2852 potential_page_fault(s
);
2853 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2854 tcg_temp_free_i32(r1
);
2855 tcg_temp_free_i32(r3
);
2860 #ifndef CONFIG_USER_ONLY
2861 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2863 int r1
= get_field(s
->fields
, l1
);
2864 check_privileged(s
);
2865 potential_page_fault(s
);
2866 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2871 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2873 int r1
= get_field(s
->fields
, l1
);
2874 check_privileged(s
);
2875 potential_page_fault(s
);
2876 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2882 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2884 potential_page_fault(s
);
2885 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2890 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2892 potential_page_fault(s
);
2893 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2895 return_low128(o
->in2
);
2899 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2901 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2905 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2907 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2911 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2913 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2917 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2919 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2923 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2925 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2929 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2931 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2932 return_low128(o
->out2
);
2936 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2938 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2939 return_low128(o
->out2
);
2943 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2945 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2946 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2947 tcg_temp_free_i64(r3
);
2951 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2953 int r3
= get_field(s
->fields
, r3
);
2954 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2958 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2960 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2961 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2962 tcg_temp_free_i64(r3
);
2966 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2968 int r3
= get_field(s
->fields
, r3
);
2969 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2973 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2976 z
= tcg_const_i64(0);
2977 n
= tcg_temp_new_i64();
2978 tcg_gen_neg_i64(n
, o
->in2
);
2979 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
2980 tcg_temp_free_i64(n
);
2981 tcg_temp_free_i64(z
);
2985 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2987 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2991 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2993 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2997 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2999 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3000 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3004 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3006 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3007 potential_page_fault(s
);
3008 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3009 tcg_temp_free_i32(l
);
3014 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3016 tcg_gen_neg_i64(o
->out
, o
->in2
);
3020 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3022 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3026 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3028 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3032 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3034 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3035 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3039 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3041 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3042 potential_page_fault(s
);
3043 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3044 tcg_temp_free_i32(l
);
3049 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3051 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3055 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3057 int shift
= s
->insn
->data
& 0xff;
3058 int size
= s
->insn
->data
>> 8;
3059 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3062 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3063 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3065 /* Produce the CC from only the bits manipulated. */
3066 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3067 set_cc_nz_u64(s
, cc_dst
);
3071 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3073 gen_helper_popcnt(o
->out
, o
->in2
);
3077 #ifndef CONFIG_USER_ONLY
3078 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3080 check_privileged(s
);
3081 gen_helper_ptlb(cpu_env
);
3086 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3088 int i3
= get_field(s
->fields
, i3
);
3089 int i4
= get_field(s
->fields
, i4
);
3090 int i5
= get_field(s
->fields
, i5
);
3091 int do_zero
= i4
& 0x80;
3092 uint64_t mask
, imask
, pmask
;
3095 /* Adjust the arguments for the specific insn. */
3096 switch (s
->fields
->op2
) {
3097 case 0x55: /* risbg */
3102 case 0x5d: /* risbhg */
3105 pmask
= 0xffffffff00000000ull
;
3107 case 0x51: /* risblg */
3110 pmask
= 0x00000000ffffffffull
;
3116 /* MASK is the set of bits to be inserted from R2.
3117 Take care for I3/I4 wraparound. */
3120 mask
^= pmask
>> i4
>> 1;
3122 mask
|= ~(pmask
>> i4
>> 1);
3126 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3127 insns, we need to keep the other half of the register. */
3128 imask
= ~mask
| ~pmask
;
3130 if (s
->fields
->op2
== 0x55) {
3137 /* In some cases we can implement this with deposit, which can be more
3138 efficient on some hosts. */
3139 if (~mask
== imask
&& i3
<= i4
) {
3140 if (s
->fields
->op2
== 0x5d) {
3143 /* Note that we rotate the bits to be inserted to the lsb, not to
3144 the position as described in the PoO. */
3147 rot
= (i5
- pos
) & 63;
3153 /* Rotate the input as necessary. */
3154 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3156 /* Insert the selected bits into the output. */
3158 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3159 } else if (imask
== 0) {
3160 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3162 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3163 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3164 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3169 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3171 int i3
= get_field(s
->fields
, i3
);
3172 int i4
= get_field(s
->fields
, i4
);
3173 int i5
= get_field(s
->fields
, i5
);
3176 /* If this is a test-only form, arrange to discard the result. */
3178 o
->out
= tcg_temp_new_i64();
3186 /* MASK is the set of bits to be operated on from R2.
3187 Take care for I3/I4 wraparound. */
3190 mask
^= ~0ull >> i4
>> 1;
3192 mask
|= ~(~0ull >> i4
>> 1);
3195 /* Rotate the input as necessary. */
3196 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3199 switch (s
->fields
->op2
) {
3200 case 0x55: /* AND */
3201 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3202 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3205 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3206 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3208 case 0x57: /* XOR */
3209 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3210 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3217 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3218 set_cc_nz_u64(s
, cc_dst
);
3222 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3224 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3228 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3230 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3234 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3236 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3240 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3242 TCGv_i32 t1
= tcg_temp_new_i32();
3243 TCGv_i32 t2
= tcg_temp_new_i32();
3244 TCGv_i32 to
= tcg_temp_new_i32();
3245 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3246 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3247 tcg_gen_rotl_i32(to
, t1
, t2
);
3248 tcg_gen_extu_i32_i64(o
->out
, to
);
3249 tcg_temp_free_i32(t1
);
3250 tcg_temp_free_i32(t2
);
3251 tcg_temp_free_i32(to
);
3255 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3257 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3261 #ifndef CONFIG_USER_ONLY
3262 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3264 check_privileged(s
);
3265 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3270 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3272 check_privileged(s
);
3273 gen_helper_sacf(cpu_env
, o
->in2
);
3274 /* Addressing mode has changed, so end the block. */
3275 return EXIT_PC_STALE
;
3279 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3281 int sam
= s
->insn
->data
;
3297 /* Bizarre but true, we check the address of the current insn for the
3298 specification exception, not the next to be executed. Thus the PoO
3299 documents that Bad Things Happen two bytes before the end. */
3300 if (s
->pc
& ~mask
) {
3301 gen_program_exception(s
, PGM_SPECIFICATION
);
3302 return EXIT_NORETURN
;
3306 tsam
= tcg_const_i64(sam
);
3307 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3308 tcg_temp_free_i64(tsam
);
3310 /* Always exit the TB, since we (may have) changed execution mode. */
3311 return EXIT_PC_STALE
;
3314 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3316 int r1
= get_field(s
->fields
, r1
);
3317 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3321 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3323 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3327 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3329 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3333 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3335 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3336 return_low128(o
->out2
);
3340 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3342 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3346 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3348 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3352 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3354 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3355 return_low128(o
->out2
);
3359 #ifndef CONFIG_USER_ONLY
3360 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3362 check_privileged(s
);
3363 potential_page_fault(s
);
3364 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3369 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3371 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3372 check_privileged(s
);
3373 potential_page_fault(s
);
3374 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3375 tcg_temp_free_i32(r1
);
3380 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3387 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3389 /* We want to store when the condition is fulfilled, so branch
3390 out when it's not */
3391 c
.cond
= tcg_invert_cond(c
.cond
);
3393 lab
= gen_new_label();
3395 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3397 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3401 r1
= get_field(s
->fields
, r1
);
3402 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3403 if (s
->insn
->data
) {
3404 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3406 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3408 tcg_temp_free_i64(a
);
3414 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3416 uint64_t sign
= 1ull << s
->insn
->data
;
3417 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3418 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3419 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3420 /* The arithmetic left shift is curious in that it does not affect
3421 the sign bit. Copy that over from the source unchanged. */
3422 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3423 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3424 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3428 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3430 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3434 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3436 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3440 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3442 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3446 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3448 gen_helper_sfpc(cpu_env
, o
->in2
);
3452 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3454 gen_helper_sfas(cpu_env
, o
->in2
);
3458 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3460 int b2
= get_field(s
->fields
, b2
);
3461 int d2
= get_field(s
->fields
, d2
);
3462 TCGv_i64 t1
= tcg_temp_new_i64();
3463 TCGv_i64 t2
= tcg_temp_new_i64();
3466 switch (s
->fields
->op2
) {
3467 case 0x99: /* SRNM */
3470 case 0xb8: /* SRNMB */
3473 case 0xb9: /* SRNMT */
3479 mask
= (1 << len
) - 1;
3481 /* Insert the value into the appropriate field of the FPC. */
3483 tcg_gen_movi_i64(t1
, d2
& mask
);
3485 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3486 tcg_gen_andi_i64(t1
, t1
, mask
);
3488 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3489 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3490 tcg_temp_free_i64(t1
);
3492 /* Then install the new FPC to set the rounding mode in fpu_status. */
3493 gen_helper_sfpc(cpu_env
, t2
);
3494 tcg_temp_free_i64(t2
);
3498 #ifndef CONFIG_USER_ONLY
3499 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3501 check_privileged(s
);
3502 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3503 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3507 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3509 check_privileged(s
);
3510 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3514 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3516 check_privileged(s
);
3517 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3521 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3523 check_privileged(s
);
3524 /* ??? Surely cpu address != cpu number. In any case the previous
3525 version of this stored more than the required half-word, so it
3526 is unlikely this has ever been tested. */
3527 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3531 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3533 gen_helper_stck(o
->out
, cpu_env
);
3534 /* ??? We don't implement clock states. */
3535 gen_op_movi_cc(s
, 0);
3539 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3541 TCGv_i64 c1
= tcg_temp_new_i64();
3542 TCGv_i64 c2
= tcg_temp_new_i64();
3543 gen_helper_stck(c1
, cpu_env
);
3544 /* Shift the 64-bit value into its place as a zero-extended
3545 104-bit value. Note that "bit positions 64-103 are always
3546 non-zero so that they compare differently to STCK"; we set
3547 the least significant bit to 1. */
3548 tcg_gen_shli_i64(c2
, c1
, 56);
3549 tcg_gen_shri_i64(c1
, c1
, 8);
3550 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3551 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3552 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3553 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3554 tcg_temp_free_i64(c1
);
3555 tcg_temp_free_i64(c2
);
3556 /* ??? We don't implement clock states. */
3557 gen_op_movi_cc(s
, 0);
3561 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3563 check_privileged(s
);
3564 gen_helper_sckc(cpu_env
, o
->in2
);
3568 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3570 check_privileged(s
);
3571 gen_helper_stckc(o
->out
, cpu_env
);
3575 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3577 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3578 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3579 check_privileged(s
);
3580 potential_page_fault(s
);
3581 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3582 tcg_temp_free_i32(r1
);
3583 tcg_temp_free_i32(r3
);
3587 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3589 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3590 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3591 check_privileged(s
);
3592 potential_page_fault(s
);
3593 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3594 tcg_temp_free_i32(r1
);
3595 tcg_temp_free_i32(r3
);
3599 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3601 TCGv_i64 t1
= tcg_temp_new_i64();
3603 check_privileged(s
);
3604 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3605 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3606 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3607 tcg_temp_free_i64(t1
);
3612 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3614 check_privileged(s
);
3615 gen_helper_spt(cpu_env
, o
->in2
);
3619 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3622 /* We really ought to have more complete indication of facilities
3623 that we implement. Address this when STFLE is implemented. */
3624 check_privileged(s
);
3625 f
= tcg_const_i64(0xc0000000);
3626 a
= tcg_const_i64(200);
3627 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3628 tcg_temp_free_i64(f
);
3629 tcg_temp_free_i64(a
);
3633 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3635 check_privileged(s
);
3636 gen_helper_stpt(o
->out
, cpu_env
);
3640 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3642 check_privileged(s
);
3643 potential_page_fault(s
);
3644 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3649 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3651 check_privileged(s
);
3652 gen_helper_spx(cpu_env
, o
->in2
);
3656 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3658 check_privileged(s
);
3659 potential_page_fault(s
);
3660 gen_helper_xsch(cpu_env
, regs
[1]);
3665 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3667 check_privileged(s
);
3668 potential_page_fault(s
);
3669 gen_helper_csch(cpu_env
, regs
[1]);
3674 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3676 check_privileged(s
);
3677 potential_page_fault(s
);
3678 gen_helper_hsch(cpu_env
, regs
[1]);
3683 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3685 check_privileged(s
);
3686 potential_page_fault(s
);
3687 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3692 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3694 check_privileged(s
);
3695 potential_page_fault(s
);
3696 gen_helper_rchp(cpu_env
, regs
[1]);
3701 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3703 check_privileged(s
);
3704 potential_page_fault(s
);
3705 gen_helper_rsch(cpu_env
, regs
[1]);
3710 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3712 check_privileged(s
);
3713 potential_page_fault(s
);
3714 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3719 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3721 check_privileged(s
);
3722 potential_page_fault(s
);
3723 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3728 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3730 check_privileged(s
);
3731 potential_page_fault(s
);
3732 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3737 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3739 check_privileged(s
);
3740 potential_page_fault(s
);
3741 gen_helper_chsc(cpu_env
, o
->in2
);
3746 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3748 check_privileged(s
);
3749 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3750 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3754 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3756 uint64_t i2
= get_field(s
->fields
, i2
);
3759 check_privileged(s
);
3761 /* It is important to do what the instruction name says: STORE THEN.
3762 If we let the output hook perform the store then if we fault and
3763 restart, we'll have the wrong SYSTEM MASK in place. */
3764 t
= tcg_temp_new_i64();
3765 tcg_gen_shri_i64(t
, psw_mask
, 56);
3766 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3767 tcg_temp_free_i64(t
);
3769 if (s
->fields
->op
== 0xac) {
3770 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3771 (i2
<< 56) | 0x00ffffffffffffffull
);
3773 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3778 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3780 check_privileged(s
);
3781 potential_page_fault(s
);
3782 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3786 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3788 check_privileged(s
);
3789 potential_page_fault(s
);
3790 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3795 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3797 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3801 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3803 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3807 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3809 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3813 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3815 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3819 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3821 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3822 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3823 potential_page_fault(s
);
3824 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3825 tcg_temp_free_i32(r1
);
3826 tcg_temp_free_i32(r3
);
3830 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3832 int m3
= get_field(s
->fields
, m3
);
3833 int pos
, base
= s
->insn
->data
;
3834 TCGv_i64 tmp
= tcg_temp_new_i64();
3836 pos
= base
+ ctz32(m3
) * 8;
3839 /* Effectively a 32-bit store. */
3840 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3841 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3847 /* Effectively a 16-bit store. */
3848 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3849 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3856 /* Effectively an 8-bit store. */
3857 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3858 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3862 /* This is going to be a sequence of shifts and stores. */
3863 pos
= base
+ 32 - 8;
3866 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3867 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3868 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3870 m3
= (m3
<< 1) & 0xf;
3875 tcg_temp_free_i64(tmp
);
3879 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3881 int r1
= get_field(s
->fields
, r1
);
3882 int r3
= get_field(s
->fields
, r3
);
3883 int size
= s
->insn
->data
;
3884 TCGv_i64 tsize
= tcg_const_i64(size
);
3888 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3890 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3895 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3899 tcg_temp_free_i64(tsize
);
3903 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3905 int r1
= get_field(s
->fields
, r1
);
3906 int r3
= get_field(s
->fields
, r3
);
3907 TCGv_i64 t
= tcg_temp_new_i64();
3908 TCGv_i64 t4
= tcg_const_i64(4);
3909 TCGv_i64 t32
= tcg_const_i64(32);
3912 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3913 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3917 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3921 tcg_temp_free_i64(t
);
3922 tcg_temp_free_i64(t4
);
3923 tcg_temp_free_i64(t32
);
3927 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3929 potential_page_fault(s
);
3930 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3932 return_low128(o
->in2
);
3936 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3938 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3942 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3947 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3949 /* The !borrow flag is the msb of CC. Since we want the inverse of
3950 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3951 disas_jcc(s
, &cmp
, 8 | 4);
3952 borrow
= tcg_temp_new_i64();
3954 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3956 TCGv_i32 t
= tcg_temp_new_i32();
3957 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3958 tcg_gen_extu_i32_i64(borrow
, t
);
3959 tcg_temp_free_i32(t
);
3963 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3964 tcg_temp_free_i64(borrow
);
3968 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3975 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3976 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3977 tcg_temp_free_i32(t
);
3979 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3980 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3981 tcg_temp_free_i32(t
);
3983 gen_exception(EXCP_SVC
);
3984 return EXIT_NORETURN
;
3987 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3989 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
3994 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3996 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4001 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4003 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4008 #ifndef CONFIG_USER_ONLY
4009 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4011 potential_page_fault(s
);
4012 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4018 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4020 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4021 potential_page_fault(s
);
4022 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4023 tcg_temp_free_i32(l
);
4028 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4030 potential_page_fault(s
);
4031 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4032 return_low128(o
->out2
);
4037 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4039 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4040 potential_page_fault(s
);
4041 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4042 tcg_temp_free_i32(l
);
4047 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4049 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4050 potential_page_fault(s
);
4051 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4052 tcg_temp_free_i32(l
);
4056 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4058 int d1
= get_field(s
->fields
, d1
);
4059 int d2
= get_field(s
->fields
, d2
);
4060 int b1
= get_field(s
->fields
, b1
);
4061 int b2
= get_field(s
->fields
, b2
);
4062 int l
= get_field(s
->fields
, l1
);
4065 o
->addr1
= get_address(s
, 0, b1
, d1
);
4067 /* If the addresses are identical, this is a store/memset of zero. */
4068 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4069 o
->in2
= tcg_const_i64(0);
4073 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4076 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4080 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4083 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4087 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4090 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4094 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4096 gen_op_movi_cc(s
, 0);
4100 /* But in general we'll defer to a helper. */
4101 o
->in2
= get_address(s
, 0, b2
, d2
);
4102 t32
= tcg_const_i32(l
);
4103 potential_page_fault(s
);
4104 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4105 tcg_temp_free_i32(t32
);
4110 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4112 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4116 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4118 int shift
= s
->insn
->data
& 0xff;
4119 int size
= s
->insn
->data
>> 8;
4120 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4123 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4124 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4126 /* Produce the CC from only the bits manipulated. */
4127 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4128 set_cc_nz_u64(s
, cc_dst
);
4132 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4134 o
->out
= tcg_const_i64(0);
4138 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4140 o
->out
= tcg_const_i64(0);
4146 /* ====================================================================== */
4147 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4148 the original inputs), update the various cc data structures in order to
4149 be able to compute the new condition code. */
4151 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4153 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4156 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4158 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4161 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4163 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4166 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4168 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4171 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4173 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4176 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4178 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4181 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4183 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4186 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4188 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4191 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4193 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4196 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4198 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4201 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4203 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4206 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4208 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4211 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4213 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4216 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4218 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4221 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4223 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4226 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4228 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4231 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4233 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4236 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4238 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4241 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4243 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4246 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4248 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4249 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4252 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4254 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4257 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4259 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4262 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4264 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4267 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4269 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4272 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4274 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4277 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4279 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4282 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4284 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4287 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4289 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4292 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4294 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4297 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4299 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4302 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4304 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4307 /* ====================================================================== */
4308 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4309 with the TCG register to which we will write. Used in combination with
4310 the "wout" generators, in some cases we need a new temporary, and in
4311 some cases we can write to a TCG global. */
4313 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4315 o
->out
= tcg_temp_new_i64();
4317 #define SPEC_prep_new 0
4319 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4321 o
->out
= tcg_temp_new_i64();
4322 o
->out2
= tcg_temp_new_i64();
4324 #define SPEC_prep_new_P 0
4326 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4328 o
->out
= regs
[get_field(f
, r1
)];
4331 #define SPEC_prep_r1 0
4333 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4335 int r1
= get_field(f
, r1
);
4337 o
->out2
= regs
[r1
+ 1];
4338 o
->g_out
= o
->g_out2
= true;
4340 #define SPEC_prep_r1_P SPEC_r1_even
4342 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4344 o
->out
= fregs
[get_field(f
, r1
)];
4347 #define SPEC_prep_f1 0
4349 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4351 int r1
= get_field(f
, r1
);
4353 o
->out2
= fregs
[r1
+ 2];
4354 o
->g_out
= o
->g_out2
= true;
4356 #define SPEC_prep_x1 SPEC_r1_f128
4358 /* ====================================================================== */
4359 /* The "Write OUTput" generators. These generally perform some non-trivial
4360 copy of data to TCG globals, or to main memory. The trivial cases are
4361 generally handled by having a "prep" generator install the TCG global
4362 as the destination of the operation. */
4364 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4366 store_reg(get_field(f
, r1
), o
->out
);
4368 #define SPEC_wout_r1 0
4370 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4372 int r1
= get_field(f
, r1
);
4373 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4375 #define SPEC_wout_r1_8 0
4377 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4379 int r1
= get_field(f
, r1
);
4380 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4382 #define SPEC_wout_r1_16 0
4384 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4386 store_reg32_i64(get_field(f
, r1
), o
->out
);
4388 #define SPEC_wout_r1_32 0
4390 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4392 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4394 #define SPEC_wout_r1_32h 0
4396 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4398 int r1
= get_field(f
, r1
);
4399 store_reg32_i64(r1
, o
->out
);
4400 store_reg32_i64(r1
+ 1, o
->out2
);
4402 #define SPEC_wout_r1_P32 SPEC_r1_even
4404 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4406 int r1
= get_field(f
, r1
);
4407 store_reg32_i64(r1
+ 1, o
->out
);
4408 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4409 store_reg32_i64(r1
, o
->out
);
4411 #define SPEC_wout_r1_D32 SPEC_r1_even
4413 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4415 store_freg32_i64(get_field(f
, r1
), o
->out
);
4417 #define SPEC_wout_e1 0
4419 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4421 store_freg(get_field(f
, r1
), o
->out
);
4423 #define SPEC_wout_f1 0
4425 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4427 int f1
= get_field(s
->fields
, r1
);
4428 store_freg(f1
, o
->out
);
4429 store_freg(f1
+ 2, o
->out2
);
4431 #define SPEC_wout_x1 SPEC_r1_f128
4433 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4435 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4436 store_reg32_i64(get_field(f
, r1
), o
->out
);
4439 #define SPEC_wout_cond_r1r2_32 0
4441 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4443 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4444 store_freg32_i64(get_field(f
, r1
), o
->out
);
4447 #define SPEC_wout_cond_e1e2 0
4449 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4451 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4453 #define SPEC_wout_m1_8 0
4455 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4457 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4459 #define SPEC_wout_m1_16 0
4461 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4463 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4465 #define SPEC_wout_m1_32 0
4467 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4469 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4471 #define SPEC_wout_m1_64 0
4473 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4475 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4477 #define SPEC_wout_m2_32 0
4479 static void wout_m2_32_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4481 /* XXX release reservation */
4482 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4483 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4485 #define SPEC_wout_m2_32_r1_atomic 0
4487 static void wout_m2_64_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4489 /* XXX release reservation */
4490 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4491 store_reg(get_field(f
, r1
), o
->in2
);
4493 #define SPEC_wout_m2_64_r1_atomic 0
4495 /* ====================================================================== */
4496 /* The "INput 1" generators. These load the first operand to an insn. */
4498 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4500 o
->in1
= load_reg(get_field(f
, r1
));
4502 #define SPEC_in1_r1 0
4504 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4506 o
->in1
= regs
[get_field(f
, r1
)];
4509 #define SPEC_in1_r1_o 0
4511 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4513 o
->in1
= tcg_temp_new_i64();
4514 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4516 #define SPEC_in1_r1_32s 0
4518 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4520 o
->in1
= tcg_temp_new_i64();
4521 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4523 #define SPEC_in1_r1_32u 0
4525 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4527 o
->in1
= tcg_temp_new_i64();
4528 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4530 #define SPEC_in1_r1_sr32 0
4532 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4534 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4536 #define SPEC_in1_r1p1 SPEC_r1_even
4538 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4540 o
->in1
= tcg_temp_new_i64();
4541 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4543 #define SPEC_in1_r1p1_32s SPEC_r1_even
4545 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4547 o
->in1
= tcg_temp_new_i64();
4548 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4550 #define SPEC_in1_r1p1_32u SPEC_r1_even
4552 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4554 int r1
= get_field(f
, r1
);
4555 o
->in1
= tcg_temp_new_i64();
4556 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4558 #define SPEC_in1_r1_D32 SPEC_r1_even
4560 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4562 o
->in1
= load_reg(get_field(f
, r2
));
4564 #define SPEC_in1_r2 0
4566 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4568 o
->in1
= tcg_temp_new_i64();
4569 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4571 #define SPEC_in1_r2_sr32 0
4573 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4575 o
->in1
= load_reg(get_field(f
, r3
));
4577 #define SPEC_in1_r3 0
4579 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4581 o
->in1
= regs
[get_field(f
, r3
)];
4584 #define SPEC_in1_r3_o 0
4586 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4588 o
->in1
= tcg_temp_new_i64();
4589 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4591 #define SPEC_in1_r3_32s 0
4593 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4595 o
->in1
= tcg_temp_new_i64();
4596 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4598 #define SPEC_in1_r3_32u 0
4600 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4602 int r3
= get_field(f
, r3
);
4603 o
->in1
= tcg_temp_new_i64();
4604 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4606 #define SPEC_in1_r3_D32 SPEC_r3_even
4608 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4610 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4612 #define SPEC_in1_e1 0
4614 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4616 o
->in1
= fregs
[get_field(f
, r1
)];
4619 #define SPEC_in1_f1_o 0
4621 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4623 int r1
= get_field(f
, r1
);
4625 o
->out2
= fregs
[r1
+ 2];
4626 o
->g_out
= o
->g_out2
= true;
4628 #define SPEC_in1_x1_o SPEC_r1_f128
4630 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4632 o
->in1
= fregs
[get_field(f
, r3
)];
4635 #define SPEC_in1_f3_o 0
4637 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4639 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4641 #define SPEC_in1_la1 0
4643 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4645 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4646 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4648 #define SPEC_in1_la2 0
4650 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4653 o
->in1
= tcg_temp_new_i64();
4654 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4656 #define SPEC_in1_m1_8u 0
4658 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4661 o
->in1
= tcg_temp_new_i64();
4662 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4664 #define SPEC_in1_m1_16s 0
4666 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4669 o
->in1
= tcg_temp_new_i64();
4670 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4672 #define SPEC_in1_m1_16u 0
4674 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4677 o
->in1
= tcg_temp_new_i64();
4678 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4680 #define SPEC_in1_m1_32s 0
4682 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4685 o
->in1
= tcg_temp_new_i64();
4686 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4688 #define SPEC_in1_m1_32u 0
4690 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4693 o
->in1
= tcg_temp_new_i64();
4694 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4696 #define SPEC_in1_m1_64 0
4698 /* ====================================================================== */
4699 /* The "INput 2" generators. These load the second operand to an insn. */
4701 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4703 o
->in2
= regs
[get_field(f
, r1
)];
4706 #define SPEC_in2_r1_o 0
4708 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4710 o
->in2
= tcg_temp_new_i64();
4711 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4713 #define SPEC_in2_r1_16u 0
4715 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4717 o
->in2
= tcg_temp_new_i64();
4718 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4720 #define SPEC_in2_r1_32u 0
4722 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4724 int r1
= get_field(f
, r1
);
4725 o
->in2
= tcg_temp_new_i64();
4726 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4728 #define SPEC_in2_r1_D32 SPEC_r1_even
4730 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4732 o
->in2
= load_reg(get_field(f
, r2
));
4734 #define SPEC_in2_r2 0
4736 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4738 o
->in2
= regs
[get_field(f
, r2
)];
4741 #define SPEC_in2_r2_o 0
4743 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4745 int r2
= get_field(f
, r2
);
4747 o
->in2
= load_reg(r2
);
4750 #define SPEC_in2_r2_nz 0
4752 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4754 o
->in2
= tcg_temp_new_i64();
4755 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4757 #define SPEC_in2_r2_8s 0
4759 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4761 o
->in2
= tcg_temp_new_i64();
4762 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4764 #define SPEC_in2_r2_8u 0
4766 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4768 o
->in2
= tcg_temp_new_i64();
4769 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4771 #define SPEC_in2_r2_16s 0
4773 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4775 o
->in2
= tcg_temp_new_i64();
4776 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4778 #define SPEC_in2_r2_16u 0
4780 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4782 o
->in2
= load_reg(get_field(f
, r3
));
4784 #define SPEC_in2_r3 0
4786 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4788 o
->in2
= tcg_temp_new_i64();
4789 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4791 #define SPEC_in2_r3_sr32 0
4793 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4795 o
->in2
= tcg_temp_new_i64();
4796 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4798 #define SPEC_in2_r2_32s 0
4800 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4802 o
->in2
= tcg_temp_new_i64();
4803 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4805 #define SPEC_in2_r2_32u 0
4807 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4809 o
->in2
= tcg_temp_new_i64();
4810 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4812 #define SPEC_in2_r2_sr32 0
4814 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4816 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4818 #define SPEC_in2_e2 0
4820 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4822 o
->in2
= fregs
[get_field(f
, r2
)];
4825 #define SPEC_in2_f2_o 0
4827 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4829 int r2
= get_field(f
, r2
);
4831 o
->in2
= fregs
[r2
+ 2];
4832 o
->g_in1
= o
->g_in2
= true;
4834 #define SPEC_in2_x2_o SPEC_r2_f128
4836 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4838 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4840 #define SPEC_in2_ra2 0
4842 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4844 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4845 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4847 #define SPEC_in2_a2 0
4849 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4851 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4853 #define SPEC_in2_ri2 0
4855 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4857 help_l2_shift(s
, f
, o
, 31);
4859 #define SPEC_in2_sh32 0
4861 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4863 help_l2_shift(s
, f
, o
, 63);
4865 #define SPEC_in2_sh64 0
4867 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4870 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4872 #define SPEC_in2_m2_8u 0
4874 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4877 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4879 #define SPEC_in2_m2_16s 0
4881 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4884 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4886 #define SPEC_in2_m2_16u 0
4888 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4891 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4893 #define SPEC_in2_m2_32s 0
4895 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4898 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4900 #define SPEC_in2_m2_32u 0
4902 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4905 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4907 #define SPEC_in2_m2_64 0
4909 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4912 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4914 #define SPEC_in2_mri2_16u 0
4916 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4919 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4921 #define SPEC_in2_mri2_32s 0
4923 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4926 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4928 #define SPEC_in2_mri2_32u 0
4930 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4933 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4935 #define SPEC_in2_mri2_64 0
4937 static void in2_m2_32s_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4939 /* XXX should reserve the address */
4941 o
->in2
= tcg_temp_new_i64();
4942 tcg_gen_qemu_ld32s(o
->in2
, o
->addr1
, get_mem_index(s
));
4944 #define SPEC_in2_m2_32s_atomic 0
4946 static void in2_m2_64_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4948 /* XXX should reserve the address */
4950 o
->in2
= tcg_temp_new_i64();
4951 tcg_gen_qemu_ld64(o
->in2
, o
->addr1
, get_mem_index(s
));
4953 #define SPEC_in2_m2_64_atomic 0
4955 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4957 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4959 #define SPEC_in2_i2 0
4961 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4963 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4965 #define SPEC_in2_i2_8u 0
4967 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4969 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4971 #define SPEC_in2_i2_16u 0
4973 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4975 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4977 #define SPEC_in2_i2_32u 0
4979 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4981 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4982 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4984 #define SPEC_in2_i2_16u_shl 0
4986 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4988 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4989 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4991 #define SPEC_in2_i2_32u_shl 0
4993 #ifndef CONFIG_USER_ONLY
4994 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4996 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
4998 #define SPEC_in2_insn 0
5001 /* ====================================================================== */
5003 /* Find opc within the table of insns. This is formulated as a switch
5004 statement so that (1) we get compile-time notice of cut-paste errors
5005 for duplicated opcodes, and (2) the compiler generates the binary
5006 search tree, rather than us having to post-process the table. */
5008 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5009 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5011 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5013 enum DisasInsnEnum
{
5014 #include "insn-data.def"
5018 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5022 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5024 .help_in1 = in1_##I1, \
5025 .help_in2 = in2_##I2, \
5026 .help_prep = prep_##P, \
5027 .help_wout = wout_##W, \
5028 .help_cout = cout_##CC, \
5029 .help_op = op_##OP, \
5033 /* Allow 0 to be used for NULL in the table below. */
5041 #define SPEC_in1_0 0
5042 #define SPEC_in2_0 0
5043 #define SPEC_prep_0 0
5044 #define SPEC_wout_0 0
5046 static const DisasInsn insn_info
[] = {
5047 #include "insn-data.def"
5051 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5052 case OPC: return &insn_info[insn_ ## NM];
5054 static const DisasInsn
*lookup_opc(uint16_t opc
)
5057 #include "insn-data.def"
5066 /* Extract a field from the insn. The INSN should be left-aligned in
5067 the uint64_t so that we can more easily utilize the big-bit-endian
5068 definitions we extract from the Principals of Operation. */
5070 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5078 /* Zero extract the field from the insn. */
5079 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5081 /* Sign-extend, or un-swap the field as necessary. */
5083 case 0: /* unsigned */
5085 case 1: /* signed */
5086 assert(f
->size
<= 32);
5087 m
= 1u << (f
->size
- 1);
5090 case 2: /* dl+dh split, signed 20 bit. */
5091 r
= ((int8_t)r
<< 12) | (r
>> 8);
5097 /* Validate that the "compressed" encoding we selected above is valid.
5098 I.e. we havn't make two different original fields overlap. */
5099 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5100 o
->presentC
|= 1 << f
->indexC
;
5101 o
->presentO
|= 1 << f
->indexO
;
5103 o
->c
[f
->indexC
] = r
;
5106 /* Lookup the insn at the current PC, extracting the operands into O and
5107 returning the info struct for the insn. Returns NULL for invalid insn. */
5109 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5112 uint64_t insn
, pc
= s
->pc
;
5114 const DisasInsn
*info
;
5116 insn
= ld_code2(env
, pc
);
5117 op
= (insn
>> 8) & 0xff;
5118 ilen
= get_ilen(op
);
5119 s
->next_pc
= s
->pc
+ ilen
;
5126 insn
= ld_code4(env
, pc
) << 32;
5129 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5135 /* We can't actually determine the insn format until we've looked up
5136 the full insn opcode. Which we can't do without locating the
5137 secondary opcode. Assume by default that OP2 is at bit 40; for
5138 those smaller insns that don't actually have a secondary opcode
5139 this will correctly result in OP2 = 0. */
5145 case 0xb2: /* S, RRF, RRE */
5146 case 0xb3: /* RRE, RRD, RRF */
5147 case 0xb9: /* RRE, RRF */
5148 case 0xe5: /* SSE, SIL */
5149 op2
= (insn
<< 8) >> 56;
5153 case 0xc0: /* RIL */
5154 case 0xc2: /* RIL */
5155 case 0xc4: /* RIL */
5156 case 0xc6: /* RIL */
5157 case 0xc8: /* SSF */
5158 case 0xcc: /* RIL */
5159 op2
= (insn
<< 12) >> 60;
5161 case 0xd0 ... 0xdf: /* SS */
5167 case 0xee ... 0xf3: /* SS */
5168 case 0xf8 ... 0xfd: /* SS */
5172 op2
= (insn
<< 40) >> 56;
5176 memset(f
, 0, sizeof(*f
));
5181 /* Lookup the instruction. */
5182 info
= lookup_opc(op
<< 8 | op2
);
5184 /* If we found it, extract the operands. */
5186 DisasFormat fmt
= info
->fmt
;
5189 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5190 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5196 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5198 const DisasInsn
*insn
;
5199 ExitStatus ret
= NO_EXIT
;
5203 /* Search for the insn in the table. */
5204 insn
= extract_insn(env
, s
, &f
);
5206 /* Not found means unimplemented/illegal opcode. */
5208 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5210 gen_illegal_opcode(s
);
5211 return EXIT_NORETURN
;
5214 #ifndef CONFIG_USER_ONLY
5215 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5216 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5217 gen_helper_per_ifetch(cpu_env
, addr
);
5218 tcg_temp_free_i64(addr
);
5222 /* Check for insn specification exceptions. */
5224 int spec
= insn
->spec
, excp
= 0, r
;
5226 if (spec
& SPEC_r1_even
) {
5227 r
= get_field(&f
, r1
);
5229 excp
= PGM_SPECIFICATION
;
5232 if (spec
& SPEC_r2_even
) {
5233 r
= get_field(&f
, r2
);
5235 excp
= PGM_SPECIFICATION
;
5238 if (spec
& SPEC_r3_even
) {
5239 r
= get_field(&f
, r3
);
5241 excp
= PGM_SPECIFICATION
;
5244 if (spec
& SPEC_r1_f128
) {
5245 r
= get_field(&f
, r1
);
5247 excp
= PGM_SPECIFICATION
;
5250 if (spec
& SPEC_r2_f128
) {
5251 r
= get_field(&f
, r2
);
5253 excp
= PGM_SPECIFICATION
;
5257 gen_program_exception(s
, excp
);
5258 return EXIT_NORETURN
;
5262 /* Set up the strutures we use to communicate with the helpers. */
5265 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5266 TCGV_UNUSED_I64(o
.out
);
5267 TCGV_UNUSED_I64(o
.out2
);
5268 TCGV_UNUSED_I64(o
.in1
);
5269 TCGV_UNUSED_I64(o
.in2
);
5270 TCGV_UNUSED_I64(o
.addr1
);
5272 /* Implement the instruction. */
5273 if (insn
->help_in1
) {
5274 insn
->help_in1(s
, &f
, &o
);
5276 if (insn
->help_in2
) {
5277 insn
->help_in2(s
, &f
, &o
);
5279 if (insn
->help_prep
) {
5280 insn
->help_prep(s
, &f
, &o
);
5282 if (insn
->help_op
) {
5283 ret
= insn
->help_op(s
, &o
);
5285 if (insn
->help_wout
) {
5286 insn
->help_wout(s
, &f
, &o
);
5288 if (insn
->help_cout
) {
5289 insn
->help_cout(s
, &o
);
5292 /* Free any temporaries created by the helpers. */
5293 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5294 tcg_temp_free_i64(o
.out
);
5296 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5297 tcg_temp_free_i64(o
.out2
);
5299 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5300 tcg_temp_free_i64(o
.in1
);
5302 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5303 tcg_temp_free_i64(o
.in2
);
5305 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5306 tcg_temp_free_i64(o
.addr1
);
5309 #ifndef CONFIG_USER_ONLY
5310 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5311 /* An exception might be triggered, save PSW if not already done. */
5312 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5313 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5319 /* Call the helper to check for a possible PER exception. */
5320 gen_helper_per_check_exception(cpu_env
);
5324 /* Advance to the next instruction. */
5329 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5331 S390CPU
*cpu
= s390_env_get_cpu(env
);
5332 CPUState
*cs
= CPU(cpu
);
5334 target_ulong pc_start
;
5335 uint64_t next_page_start
;
5336 int num_insns
, max_insns
;
5343 if (!(tb
->flags
& FLAG_MASK_64
)) {
5344 pc_start
&= 0x7fffffff;
5349 dc
.cc_op
= CC_OP_DYNAMIC
;
5350 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5352 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5355 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5356 if (max_insns
== 0) {
5357 max_insns
= CF_COUNT_MASK
;
5359 if (max_insns
> TCG_MAX_INSNS
) {
5360 max_insns
= TCG_MAX_INSNS
;
5366 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5369 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5370 status
= EXIT_PC_STALE
;
5372 /* The address covered by the breakpoint must be included in
5373 [tb->pc, tb->pc + tb->size) in order to for it to be
5374 properly cleared -- thus we increment the PC here so that
5375 the logic setting tb->size below does the right thing. */
5380 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5385 if (status
== NO_EXIT
) {
5386 status
= translate_one(env
, &dc
);
5389 /* If we reach a page boundary, are single stepping,
5390 or exhaust instruction count, stop generation. */
5391 if (status
== NO_EXIT
5392 && (dc
.pc
>= next_page_start
5393 || tcg_op_buf_full()
5394 || num_insns
>= max_insns
5396 || cs
->singlestep_enabled
)) {
5397 status
= EXIT_PC_STALE
;
5399 } while (status
== NO_EXIT
);
5401 if (tb
->cflags
& CF_LAST_IO
) {
5410 update_psw_addr(&dc
);
5412 case EXIT_PC_UPDATED
:
5413 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5414 cc op type is in env */
5416 /* Exit the TB, either by raising a debug exception or by return. */
5418 gen_exception(EXCP_DEBUG
);
5427 gen_tb_end(tb
, num_insns
);
5429 tb
->size
= dc
.pc
- pc_start
;
5430 tb
->icount
= num_insns
;
5432 #if defined(S390X_DEBUG_DISAS)
5433 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5434 && qemu_log_in_addr_range(pc_start
)) {
5435 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5436 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5442 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5445 int cc_op
= data
[1];
5446 env
->psw
.addr
= data
[0];
5447 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {