4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
62 bool singlestep_enabled
;
65 /* Information carried about a condition to be evaluated. */
72 struct { TCGv_i64 a
, b
; } s64
;
73 struct { TCGv_i32 a
, b
; } s32
;
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
97 S390CPU
*cpu
= S390_CPU(cs
);
98 CPUS390XState
*env
= &cpu
->env
;
101 if (env
->cc_op
> 3) {
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
103 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
105 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
106 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
109 for (i
= 0; i
< 16; i
++) {
110 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
112 cpu_fprintf(f
, "\n");
118 for (i
= 0; i
< 16; i
++) {
119 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
121 cpu_fprintf(f
, "\n");
127 for (i
= 0; i
< 32; i
++) {
128 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
129 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
130 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i
= 0; i
< 16; i
++) {
135 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
137 cpu_fprintf(f
, "\n");
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i
= 0; i
< CC_OP_MAX
; i
++) {
146 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
147 inline_branch_miss
[i
], inline_branch_hit
[i
]);
151 cpu_fprintf(f
, "\n");
154 static TCGv_i64 psw_addr
;
155 static TCGv_i64 psw_mask
;
156 static TCGv_i64 gbea
;
158 static TCGv_i32 cc_op
;
159 static TCGv_i64 cc_src
;
160 static TCGv_i64 cc_dst
;
161 static TCGv_i64 cc_vr
;
163 static char cpu_reg_names
[32][4];
164 static TCGv_i64 regs
[16];
165 static TCGv_i64 fregs
[16];
167 void s390x_translate_init(void)
171 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
172 tcg_ctx
.tcg_env
= cpu_env
;
173 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
174 offsetof(CPUS390XState
, psw
.addr
),
176 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
177 offsetof(CPUS390XState
, psw
.mask
),
179 gbea
= tcg_global_mem_new_i64(cpu_env
,
180 offsetof(CPUS390XState
, gbea
),
183 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
185 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
187 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
189 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
192 for (i
= 0; i
< 16; i
++) {
193 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
194 regs
[i
] = tcg_global_mem_new(cpu_env
,
195 offsetof(CPUS390XState
, regs
[i
]),
199 for (i
= 0; i
< 16; i
++) {
200 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
201 fregs
[i
] = tcg_global_mem_new(cpu_env
,
202 offsetof(CPUS390XState
, vregs
[i
][0].d
),
203 cpu_reg_names
[i
+ 16]);
207 static TCGv_i64
load_reg(int reg
)
209 TCGv_i64 r
= tcg_temp_new_i64();
210 tcg_gen_mov_i64(r
, regs
[reg
]);
214 static TCGv_i64
load_freg32_i64(int reg
)
216 TCGv_i64 r
= tcg_temp_new_i64();
217 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
221 static void store_reg(int reg
, TCGv_i64 v
)
223 tcg_gen_mov_i64(regs
[reg
], v
);
226 static void store_freg(int reg
, TCGv_i64 v
)
228 tcg_gen_mov_i64(fregs
[reg
], v
);
231 static void store_reg32_i64(int reg
, TCGv_i64 v
)
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
237 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
239 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
242 static void store_freg32_i64(int reg
, TCGv_i64 v
)
244 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
247 static void return_low128(TCGv_i64 dest
)
249 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
252 static void update_psw_addr(DisasContext
*s
)
255 tcg_gen_movi_i64(psw_addr
, s
->pc
);
258 static void per_branch(DisasContext
*s
, bool to_next
)
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea
, s
->pc
);
263 if (s
->tb
->flags
& FLAG_MASK_PER
) {
264 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
265 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
267 tcg_temp_free_i64(next_pc
);
273 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
274 TCGv_i64 arg1
, TCGv_i64 arg2
)
276 #ifndef CONFIG_USER_ONLY
277 if (s
->tb
->flags
& FLAG_MASK_PER
) {
278 TCGLabel
*lab
= gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
281 tcg_gen_movi_i64(gbea
, s
->pc
);
282 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
286 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
287 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
288 tcg_temp_free_i64(pc
);
293 static void per_breaking_event(DisasContext
*s
)
295 tcg_gen_movi_i64(gbea
, s
->pc
);
298 static void update_cc_op(DisasContext
*s
)
300 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
301 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
305 static void potential_page_fault(DisasContext
*s
)
311 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
313 return (uint64_t)cpu_lduw_code(env
, pc
);
316 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
321 static int get_mem_index(DisasContext
*s
)
323 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
324 case PSW_ASC_PRIMARY
>> 32:
326 case PSW_ASC_SECONDARY
>> 32:
328 case PSW_ASC_HOME
>> 32:
336 static void gen_exception(int excp
)
338 TCGv_i32 tmp
= tcg_const_i32(excp
);
339 gen_helper_exception(cpu_env
, tmp
);
340 tcg_temp_free_i32(tmp
);
343 static void gen_program_exception(DisasContext
*s
, int code
)
347 /* Remember what pgm exeption this was. */
348 tmp
= tcg_const_i32(code
);
349 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
350 tcg_temp_free_i32(tmp
);
352 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
353 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
354 tcg_temp_free_i32(tmp
);
356 /* Advance past instruction. */
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM
);
367 static inline void gen_illegal_opcode(DisasContext
*s
)
369 gen_program_exception(s
, PGM_OPERATION
);
372 static inline void gen_trap(DisasContext
*s
)
376 /* Set DXC to 0xff. */
377 t
= tcg_temp_new_i32();
378 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
379 tcg_gen_ori_i32(t
, t
, 0xff00);
380 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
381 tcg_temp_free_i32(t
);
383 gen_program_exception(s
, PGM_DATA
);
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext
*s
)
389 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
390 gen_program_exception(s
, PGM_PRIVILEGED
);
395 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
397 TCGv_i64 tmp
= tcg_temp_new_i64();
398 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
403 /* Note that addi optimizes the imm==0 case. */
405 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
406 tcg_gen_addi_i64(tmp
, tmp
, d2
);
408 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
410 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
416 tcg_gen_movi_i64(tmp
, d2
);
419 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
425 static inline bool live_cc_data(DisasContext
*s
)
427 return (s
->cc_op
!= CC_OP_DYNAMIC
428 && s
->cc_op
!= CC_OP_STATIC
432 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
434 if (live_cc_data(s
)) {
435 tcg_gen_discard_i64(cc_src
);
436 tcg_gen_discard_i64(cc_dst
);
437 tcg_gen_discard_i64(cc_vr
);
439 s
->cc_op
= CC_OP_CONST0
+ val
;
442 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
444 if (live_cc_data(s
)) {
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_discard_i64(cc_vr
);
448 tcg_gen_mov_i64(cc_dst
, dst
);
452 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
455 if (live_cc_data(s
)) {
456 tcg_gen_discard_i64(cc_vr
);
458 tcg_gen_mov_i64(cc_src
, src
);
459 tcg_gen_mov_i64(cc_dst
, dst
);
463 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
464 TCGv_i64 dst
, TCGv_i64 vr
)
466 tcg_gen_mov_i64(cc_src
, src
);
467 tcg_gen_mov_i64(cc_dst
, dst
);
468 tcg_gen_mov_i64(cc_vr
, vr
);
472 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
474 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
477 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
479 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
482 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
484 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
487 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
489 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext
*s
)
495 if (live_cc_data(s
)) {
496 tcg_gen_discard_i64(cc_src
);
497 tcg_gen_discard_i64(cc_dst
);
498 tcg_gen_discard_i64(cc_vr
);
500 s
->cc_op
= CC_OP_STATIC
;
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext
*s
)
506 TCGv_i32 local_cc_op
;
509 TCGV_UNUSED_I32(local_cc_op
);
510 TCGV_UNUSED_I64(dummy
);
513 dummy
= tcg_const_i64(0);
527 local_cc_op
= tcg_const_i32(s
->cc_op
);
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
547 /* env->cc_op already is the cc value */
562 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
567 case CC_OP_LTUGTU_32
:
568 case CC_OP_LTUGTU_64
:
575 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
590 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
600 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
601 tcg_temp_free_i32(local_cc_op
);
603 if (!TCGV_IS_UNUSED_I64(dummy
)) {
604 tcg_temp_free_i64(dummy
);
607 /* We now have cc in cc_op as constant */
611 static bool use_exit_tb(DisasContext
*s
)
613 return (s
->singlestep_enabled
||
614 (s
->tb
->cflags
& CF_LAST_IO
) ||
615 (s
->tb
->flags
& FLAG_MASK_PER
));
618 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
620 if (unlikely(use_exit_tb(s
))) {
623 #ifndef CONFIG_USER_ONLY
624 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
625 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
631 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
633 #ifdef DEBUG_INLINE_BRANCHES
634 inline_branch_miss
[cc_op
]++;
638 static void account_inline_branch(DisasContext
*s
, int cc_op
)
640 #ifdef DEBUG_INLINE_BRANCHES
641 inline_branch_hit
[cc_op
]++;
645 /* Table of mask values to comparison codes, given a comparison as input.
646 For such, CC=3 should not be possible. */
647 static const TCGCond ltgt_cond
[16] = {
648 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
649 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
650 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
651 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
652 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
653 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
654 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
655 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
658 /* Table of mask values to comparison codes, given a logic op as input.
659 For such, only CC=0 and CC=1 should be possible. */
660 static const TCGCond nz_cond
[16] = {
661 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
662 TCG_COND_NEVER
, TCG_COND_NEVER
,
663 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
664 TCG_COND_NE
, TCG_COND_NE
,
665 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
666 TCG_COND_EQ
, TCG_COND_EQ
,
667 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
668 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
671 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
672 details required to generate a TCG comparison. */
673 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
676 enum cc_op old_cc_op
= s
->cc_op
;
678 if (mask
== 15 || mask
== 0) {
679 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
682 c
->g1
= c
->g2
= true;
687 /* Find the TCG condition for the mask + cc op. */
693 cond
= ltgt_cond
[mask
];
694 if (cond
== TCG_COND_NEVER
) {
697 account_inline_branch(s
, old_cc_op
);
700 case CC_OP_LTUGTU_32
:
701 case CC_OP_LTUGTU_64
:
702 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
703 if (cond
== TCG_COND_NEVER
) {
706 account_inline_branch(s
, old_cc_op
);
710 cond
= nz_cond
[mask
];
711 if (cond
== TCG_COND_NEVER
) {
714 account_inline_branch(s
, old_cc_op
);
729 account_inline_branch(s
, old_cc_op
);
744 account_inline_branch(s
, old_cc_op
);
748 switch (mask
& 0xa) {
749 case 8: /* src == 0 -> no one bit found */
752 case 2: /* src != 0 -> one bit found */
758 account_inline_branch(s
, old_cc_op
);
764 case 8 | 2: /* vr == 0 */
767 case 4 | 1: /* vr != 0 */
770 case 8 | 4: /* no carry -> vr >= src */
773 case 2 | 1: /* carry -> vr < src */
779 account_inline_branch(s
, old_cc_op
);
784 /* Note that CC=0 is impossible; treat it as dont-care. */
786 case 2: /* zero -> op1 == op2 */
789 case 4 | 1: /* !zero -> op1 != op2 */
792 case 4: /* borrow (!carry) -> op1 < op2 */
795 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
801 account_inline_branch(s
, old_cc_op
);
806 /* Calculate cc value. */
811 /* Jump based on CC. We'll load up the real cond below;
812 the assignment here merely avoids a compiler warning. */
813 account_noninline_branch(s
, old_cc_op
);
814 old_cc_op
= CC_OP_STATIC
;
815 cond
= TCG_COND_NEVER
;
819 /* Load up the arguments of the comparison. */
821 c
->g1
= c
->g2
= false;
825 c
->u
.s32
.a
= tcg_temp_new_i32();
826 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
827 c
->u
.s32
.b
= tcg_const_i32(0);
830 case CC_OP_LTUGTU_32
:
833 c
->u
.s32
.a
= tcg_temp_new_i32();
834 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
835 c
->u
.s32
.b
= tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
843 c
->u
.s64
.b
= tcg_const_i64(0);
847 case CC_OP_LTUGTU_64
:
851 c
->g1
= c
->g2
= true;
857 c
->u
.s64
.a
= tcg_temp_new_i64();
858 c
->u
.s64
.b
= tcg_const_i64(0);
859 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
864 c
->u
.s32
.a
= tcg_temp_new_i32();
865 c
->u
.s32
.b
= tcg_temp_new_i32();
866 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
867 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
868 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
870 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
877 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
878 c
->u
.s64
.b
= tcg_const_i64(0);
890 case 0x8 | 0x4 | 0x2: /* cc != 3 */
892 c
->u
.s32
.b
= tcg_const_i32(3);
894 case 0x8 | 0x4 | 0x1: /* cc != 2 */
896 c
->u
.s32
.b
= tcg_const_i32(2);
898 case 0x8 | 0x2 | 0x1: /* cc != 1 */
900 c
->u
.s32
.b
= tcg_const_i32(1);
902 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
905 c
->u
.s32
.a
= tcg_temp_new_i32();
906 c
->u
.s32
.b
= tcg_const_i32(0);
907 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
909 case 0x8 | 0x4: /* cc < 2 */
911 c
->u
.s32
.b
= tcg_const_i32(2);
913 case 0x8: /* cc == 0 */
915 c
->u
.s32
.b
= tcg_const_i32(0);
917 case 0x4 | 0x2 | 0x1: /* cc != 0 */
919 c
->u
.s32
.b
= tcg_const_i32(0);
921 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
924 c
->u
.s32
.a
= tcg_temp_new_i32();
925 c
->u
.s32
.b
= tcg_const_i32(0);
926 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
928 case 0x4: /* cc == 1 */
930 c
->u
.s32
.b
= tcg_const_i32(1);
932 case 0x2 | 0x1: /* cc > 1 */
934 c
->u
.s32
.b
= tcg_const_i32(1);
936 case 0x2: /* cc == 2 */
938 c
->u
.s32
.b
= tcg_const_i32(2);
940 case 0x1: /* cc == 3 */
942 c
->u
.s32
.b
= tcg_const_i32(3);
945 /* CC is masked by something else: (8 >> cc) & mask. */
948 c
->u
.s32
.a
= tcg_const_i32(8);
949 c
->u
.s32
.b
= tcg_const_i32(0);
950 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
951 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
962 static void free_compare(DisasCompare
*c
)
966 tcg_temp_free_i64(c
->u
.s64
.a
);
968 tcg_temp_free_i32(c
->u
.s32
.a
);
973 tcg_temp_free_i64(c
->u
.s64
.b
);
975 tcg_temp_free_i32(c
->u
.s32
.b
);
980 /* ====================================================================== */
981 /* Define the insn format enumeration. */
982 #define F0(N) FMT_##N,
983 #define F1(N, X1) F0(N)
984 #define F2(N, X1, X2) F0(N)
985 #define F3(N, X1, X2, X3) F0(N)
986 #define F4(N, X1, X2, X3, X4) F0(N)
987 #define F5(N, X1, X2, X3, X4, X5) F0(N)
990 #include "insn-format.def"
1000 /* Define a structure to hold the decoded fields. We'll store each inside
1001 an array indexed by an enum. In order to conserve memory, we'll arrange
1002 for fields that do not exist at the same time to overlap, thus the "C"
1003 for compact. For checking purposes there is an "O" for original index
1004 as well that will be applied to availability bitmaps. */
1006 enum DisasFieldIndexO
{
1029 enum DisasFieldIndexC
{
1060 struct DisasFields
{
1064 unsigned presentC
:16;
1065 unsigned int presentO
;
1069 /* This is the way fields are to be accessed out of DisasFields. */
1070 #define have_field(S, F) have_field1((S), FLD_O_##F)
1071 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1073 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1075 return (f
->presentO
>> c
) & 1;
1078 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1079 enum DisasFieldIndexC c
)
1081 assert(have_field1(f
, o
));
1085 /* Describe the layout of each field in each format. */
1086 typedef struct DisasField
{
1088 unsigned int size
:8;
1089 unsigned int type
:2;
1090 unsigned int indexC
:6;
1091 enum DisasFieldIndexO indexO
:8;
1094 typedef struct DisasFormatInfo
{
1095 DisasField op
[NUM_C_FIELD
];
1098 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1099 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1100 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1102 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1105 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1106 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1107 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1109 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1110 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1111 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1113 #define F0(N) { { } },
1114 #define F1(N, X1) { { X1 } },
1115 #define F2(N, X1, X2) { { X1, X2 } },
1116 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1117 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1118 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1120 static const DisasFormatInfo format_info
[] = {
1121 #include "insn-format.def"
1139 /* Generally, we'll extract operands into this structures, operate upon
1140 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1141 of routines below for more details. */
1143 bool g_out
, g_out2
, g_in1
, g_in2
;
1144 TCGv_i64 out
, out2
, in1
, in2
;
1148 /* Instructions can place constraints on their operands, raising specification
1149 exceptions if they are violated. To make this easy to automate, each "in1",
1150 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1151 of the following, or 0. To make this easy to document, we'll put the
1152 SPEC_<name> defines next to <name>. */
1154 #define SPEC_r1_even 1
1155 #define SPEC_r2_even 2
1156 #define SPEC_r3_even 4
1157 #define SPEC_r1_f128 8
1158 #define SPEC_r2_f128 16
1160 /* Return values from translate_one, indicating the state of the TB. */
1162 /* Continue the TB. */
1164 /* We have emitted one or more goto_tb. No fixup required. */
1166 /* We are not using a goto_tb (for whatever reason), but have updated
1167 the PC (for whatever reason), so there's no need to do it again on
1170 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1171 updated the PC for the next instruction to be executed. */
1173 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1174 No following code will be executed. */
1178 typedef enum DisasFacility
{
1179 FAC_Z
, /* zarch (default) */
1180 FAC_CASS
, /* compare and swap and store */
1181 FAC_CASS2
, /* compare and swap and store 2*/
1182 FAC_DFP
, /* decimal floating point */
1183 FAC_DFPR
, /* decimal floating point rounding */
1184 FAC_DO
, /* distinct operands */
1185 FAC_EE
, /* execute extensions */
1186 FAC_EI
, /* extended immediate */
1187 FAC_FPE
, /* floating point extension */
1188 FAC_FPSSH
, /* floating point support sign handling */
1189 FAC_FPRGR
, /* FPR-GR transfer */
1190 FAC_GIE
, /* general instructions extension */
1191 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1192 FAC_HW
, /* high-word */
1193 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1194 FAC_MIE
, /* miscellaneous-instruction-extensions */
1195 FAC_LAT
, /* load-and-trap */
1196 FAC_LOC
, /* load/store on condition */
1197 FAC_LD
, /* long displacement */
1198 FAC_PC
, /* population count */
1199 FAC_SCF
, /* store clock fast */
1200 FAC_SFLE
, /* store facility list extended */
1201 FAC_ILA
, /* interlocked access facility 1 */
1202 FAC_LPP
, /* load-program-parameter */
1208 DisasFacility fac
:8;
1213 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1214 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1215 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1216 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1217 void (*help_cout
)(DisasContext
*, DisasOps
*);
1218 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1223 /* ====================================================================== */
1224 /* Miscellaneous helpers, used by several operations. */
1226 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1227 DisasOps
*o
, int mask
)
1229 int b2
= get_field(f
, b2
);
1230 int d2
= get_field(f
, d2
);
1233 o
->in2
= tcg_const_i64(d2
& mask
);
1235 o
->in2
= get_address(s
, 0, b2
, d2
);
1236 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1240 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1242 if (dest
== s
->next_pc
) {
1243 per_branch(s
, true);
1246 if (use_goto_tb(s
, dest
)) {
1248 per_breaking_event(s
);
1250 tcg_gen_movi_i64(psw_addr
, dest
);
1251 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1252 return EXIT_GOTO_TB
;
1254 tcg_gen_movi_i64(psw_addr
, dest
);
1255 per_branch(s
, false);
1256 return EXIT_PC_UPDATED
;
1260 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1261 bool is_imm
, int imm
, TCGv_i64 cdest
)
1264 uint64_t dest
= s
->pc
+ 2 * imm
;
1267 /* Take care of the special cases first. */
1268 if (c
->cond
== TCG_COND_NEVER
) {
1273 if (dest
== s
->next_pc
) {
1274 /* Branch to next. */
1275 per_branch(s
, true);
1279 if (c
->cond
== TCG_COND_ALWAYS
) {
1280 ret
= help_goto_direct(s
, dest
);
1284 if (TCGV_IS_UNUSED_I64(cdest
)) {
1285 /* E.g. bcr %r0 -> no branch. */
1289 if (c
->cond
== TCG_COND_ALWAYS
) {
1290 tcg_gen_mov_i64(psw_addr
, cdest
);
1291 per_branch(s
, false);
1292 ret
= EXIT_PC_UPDATED
;
1297 if (use_goto_tb(s
, s
->next_pc
)) {
1298 if (is_imm
&& use_goto_tb(s
, dest
)) {
1299 /* Both exits can use goto_tb. */
1302 lab
= gen_new_label();
1304 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1306 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1309 /* Branch not taken. */
1311 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1312 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1316 per_breaking_event(s
);
1318 tcg_gen_movi_i64(psw_addr
, dest
);
1319 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1323 /* Fallthru can use goto_tb, but taken branch cannot. */
1324 /* Store taken branch destination before the brcond. This
1325 avoids having to allocate a new local temp to hold it.
1326 We'll overwrite this in the not taken case anyway. */
1328 tcg_gen_mov_i64(psw_addr
, cdest
);
1331 lab
= gen_new_label();
1333 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1335 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1338 /* Branch not taken. */
1341 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1342 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1346 tcg_gen_movi_i64(psw_addr
, dest
);
1348 per_breaking_event(s
);
1349 ret
= EXIT_PC_UPDATED
;
1352 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1353 Most commonly we're single-stepping or some other condition that
1354 disables all use of goto_tb. Just update the PC and exit. */
1356 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1358 cdest
= tcg_const_i64(dest
);
1362 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1364 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1366 TCGv_i32 t0
= tcg_temp_new_i32();
1367 TCGv_i64 t1
= tcg_temp_new_i64();
1368 TCGv_i64 z
= tcg_const_i64(0);
1369 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1370 tcg_gen_extu_i32_i64(t1
, t0
);
1371 tcg_temp_free_i32(t0
);
1372 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1373 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1374 tcg_temp_free_i64(t1
);
1375 tcg_temp_free_i64(z
);
1379 tcg_temp_free_i64(cdest
);
1381 tcg_temp_free_i64(next
);
1383 ret
= EXIT_PC_UPDATED
;
1391 /* ====================================================================== */
1392 /* The operations. These perform the bulk of the work for any insn,
1393 usually after the operands have been loaded and output initialized. */
1395 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1398 z
= tcg_const_i64(0);
1399 n
= tcg_temp_new_i64();
1400 tcg_gen_neg_i64(n
, o
->in2
);
1401 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1402 tcg_temp_free_i64(n
);
1403 tcg_temp_free_i64(z
);
1407 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1409 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1413 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1415 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1419 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1421 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1422 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1426 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1428 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1432 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1437 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1439 /* The carry flag is the msb of CC, therefore the branch mask that would
1440 create that comparison is 3. Feeding the generated comparison to
1441 setcond produces the carry flag that we desire. */
1442 disas_jcc(s
, &cmp
, 3);
1443 carry
= tcg_temp_new_i64();
1445 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1447 TCGv_i32 t
= tcg_temp_new_i32();
1448 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1449 tcg_gen_extu_i32_i64(carry
, t
);
1450 tcg_temp_free_i32(t
);
1454 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1455 tcg_temp_free_i64(carry
);
1459 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1461 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1465 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1467 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1471 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1473 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1474 return_low128(o
->out2
);
1478 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1480 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1484 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1486 int shift
= s
->insn
->data
& 0xff;
1487 int size
= s
->insn
->data
>> 8;
1488 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1491 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1492 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1493 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1495 /* Produce the CC from only the bits manipulated. */
1496 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1497 set_cc_nz_u64(s
, cc_dst
);
1501 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1503 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1504 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1505 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1506 per_branch(s
, false);
1507 return EXIT_PC_UPDATED
;
1513 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1515 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1516 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1519 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1521 int m1
= get_field(s
->fields
, m1
);
1522 bool is_imm
= have_field(s
->fields
, i2
);
1523 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1526 /* BCR with R2 = 0 causes no branching */
1527 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1529 /* Perform serialization */
1530 /* FIXME: check for fast-BCR-serialization facility */
1531 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1534 /* Perform serialization */
1535 /* FIXME: perform checkpoint-synchronisation */
1536 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1541 disas_jcc(s
, &c
, m1
);
1542 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1545 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1547 int r1
= get_field(s
->fields
, r1
);
1548 bool is_imm
= have_field(s
->fields
, i2
);
1549 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1553 c
.cond
= TCG_COND_NE
;
1558 t
= tcg_temp_new_i64();
1559 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1560 store_reg32_i64(r1
, t
);
1561 c
.u
.s32
.a
= tcg_temp_new_i32();
1562 c
.u
.s32
.b
= tcg_const_i32(0);
1563 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1564 tcg_temp_free_i64(t
);
1566 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1569 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1571 int r1
= get_field(s
->fields
, r1
);
1572 int imm
= get_field(s
->fields
, i2
);
1576 c
.cond
= TCG_COND_NE
;
1581 t
= tcg_temp_new_i64();
1582 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1583 tcg_gen_subi_i64(t
, t
, 1);
1584 store_reg32h_i64(r1
, t
);
1585 c
.u
.s32
.a
= tcg_temp_new_i32();
1586 c
.u
.s32
.b
= tcg_const_i32(0);
1587 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1588 tcg_temp_free_i64(t
);
1590 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1593 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1595 int r1
= get_field(s
->fields
, r1
);
1596 bool is_imm
= have_field(s
->fields
, i2
);
1597 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1600 c
.cond
= TCG_COND_NE
;
1605 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1606 c
.u
.s64
.a
= regs
[r1
];
1607 c
.u
.s64
.b
= tcg_const_i64(0);
1609 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1612 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1614 int r1
= get_field(s
->fields
, r1
);
1615 int r3
= get_field(s
->fields
, r3
);
1616 bool is_imm
= have_field(s
->fields
, i2
);
1617 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1621 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1626 t
= tcg_temp_new_i64();
1627 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1628 c
.u
.s32
.a
= tcg_temp_new_i32();
1629 c
.u
.s32
.b
= tcg_temp_new_i32();
1630 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1631 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1632 store_reg32_i64(r1
, t
);
1633 tcg_temp_free_i64(t
);
1635 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1638 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1640 int r1
= get_field(s
->fields
, r1
);
1641 int r3
= get_field(s
->fields
, r3
);
1642 bool is_imm
= have_field(s
->fields
, i2
);
1643 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1646 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1649 if (r1
== (r3
| 1)) {
1650 c
.u
.s64
.b
= load_reg(r3
| 1);
1653 c
.u
.s64
.b
= regs
[r3
| 1];
1657 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1658 c
.u
.s64
.a
= regs
[r1
];
1661 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1664 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1666 int imm
, m3
= get_field(s
->fields
, m3
);
1670 c
.cond
= ltgt_cond
[m3
];
1671 if (s
->insn
->data
) {
1672 c
.cond
= tcg_unsigned_cond(c
.cond
);
1674 c
.is_64
= c
.g1
= c
.g2
= true;
1678 is_imm
= have_field(s
->fields
, i4
);
1680 imm
= get_field(s
->fields
, i4
);
1683 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1684 get_field(s
->fields
, d4
));
1687 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1690 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1692 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1697 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1699 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1704 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1706 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1711 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1713 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1714 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1715 tcg_temp_free_i32(m3
);
1716 gen_set_cc_nz_f32(s
, o
->in2
);
1720 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1722 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1723 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1724 tcg_temp_free_i32(m3
);
1725 gen_set_cc_nz_f64(s
, o
->in2
);
1729 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1731 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1732 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1733 tcg_temp_free_i32(m3
);
1734 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1738 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1740 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1741 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1742 tcg_temp_free_i32(m3
);
1743 gen_set_cc_nz_f32(s
, o
->in2
);
1747 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1749 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1750 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1751 tcg_temp_free_i32(m3
);
1752 gen_set_cc_nz_f64(s
, o
->in2
);
1756 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1758 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1759 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1760 tcg_temp_free_i32(m3
);
1761 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1765 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1767 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1768 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1769 tcg_temp_free_i32(m3
);
1770 gen_set_cc_nz_f32(s
, o
->in2
);
1774 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1776 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1777 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1778 tcg_temp_free_i32(m3
);
1779 gen_set_cc_nz_f64(s
, o
->in2
);
1783 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1785 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1786 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1787 tcg_temp_free_i32(m3
);
1788 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1792 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1794 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1795 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1796 tcg_temp_free_i32(m3
);
1797 gen_set_cc_nz_f32(s
, o
->in2
);
1801 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1803 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1804 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1805 tcg_temp_free_i32(m3
);
1806 gen_set_cc_nz_f64(s
, o
->in2
);
1810 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1812 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1813 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1814 tcg_temp_free_i32(m3
);
1815 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1819 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1821 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1822 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1823 tcg_temp_free_i32(m3
);
1827 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1829 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1830 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1831 tcg_temp_free_i32(m3
);
1835 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1837 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1838 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1839 tcg_temp_free_i32(m3
);
1840 return_low128(o
->out2
);
1844 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1846 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1847 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1848 tcg_temp_free_i32(m3
);
1852 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1854 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1855 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1856 tcg_temp_free_i32(m3
);
1860 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1862 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1863 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1864 tcg_temp_free_i32(m3
);
1865 return_low128(o
->out2
);
1869 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1871 int r2
= get_field(s
->fields
, r2
);
1872 TCGv_i64 len
= tcg_temp_new_i64();
1874 potential_page_fault(s
);
1875 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1877 return_low128(o
->out
);
1879 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1880 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1881 tcg_temp_free_i64(len
);
1886 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1888 int l
= get_field(s
->fields
, l1
);
1893 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1894 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1897 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1898 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1901 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1902 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1905 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1906 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1909 vl
= tcg_const_i32(l
);
1910 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1911 tcg_temp_free_i32(vl
);
1915 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1919 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1921 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1922 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1923 potential_page_fault(s
);
1924 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1925 tcg_temp_free_i32(r1
);
1926 tcg_temp_free_i32(r3
);
1931 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1933 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1934 TCGv_i32 t1
= tcg_temp_new_i32();
1935 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1936 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1938 tcg_temp_free_i32(t1
);
1939 tcg_temp_free_i32(m3
);
1943 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1945 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1947 return_low128(o
->in2
);
1951 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1953 TCGv_i64 t
= tcg_temp_new_i64();
1954 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1955 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1956 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1957 tcg_temp_free_i64(t
);
1961 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1963 int d2
= get_field(s
->fields
, d2
);
1964 int b2
= get_field(s
->fields
, b2
);
1967 /* Note that in1 = R3 (new value) and
1968 in2 = (zero-extended) R1 (expected value). */
1970 addr
= get_address(s
, 0, b2
, d2
);
1971 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1972 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1973 tcg_temp_free_i64(addr
);
1975 /* Are the memory and expected values (un)equal? Note that this setcond
1976 produces the output CC value, thus the NE sense of the test. */
1977 cc
= tcg_temp_new_i64();
1978 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1979 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1980 tcg_temp_free_i64(cc
);
1986 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1988 int r1
= get_field(s
->fields
, r1
);
1989 int r3
= get_field(s
->fields
, r3
);
1990 int d2
= get_field(s
->fields
, d2
);
1991 int b2
= get_field(s
->fields
, b2
);
1993 TCGv_i32 t_r1
, t_r3
;
1995 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1996 addr
= get_address(s
, 0, b2
, d2
);
1997 t_r1
= tcg_const_i32(r1
);
1998 t_r3
= tcg_const_i32(r3
);
1999 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2000 tcg_temp_free_i64(addr
);
2001 tcg_temp_free_i32(t_r1
);
2002 tcg_temp_free_i32(t_r3
);
2008 #ifndef CONFIG_USER_ONLY
2009 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2011 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2012 check_privileged(s
);
2013 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
2014 tcg_temp_free_i32(r1
);
2020 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2022 TCGv_i64 t1
= tcg_temp_new_i64();
2023 TCGv_i32 t2
= tcg_temp_new_i32();
2024 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2025 gen_helper_cvd(t1
, t2
);
2026 tcg_temp_free_i32(t2
);
2027 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2028 tcg_temp_free_i64(t1
);
2032 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2034 int m3
= get_field(s
->fields
, m3
);
2035 TCGLabel
*lab
= gen_new_label();
2038 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2039 if (s
->insn
->data
) {
2040 c
= tcg_unsigned_cond(c
);
2042 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2051 #ifndef CONFIG_USER_ONLY
2052 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2054 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2055 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2056 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2058 check_privileged(s
);
2062 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2064 tcg_temp_free_i32(func_code
);
2065 tcg_temp_free_i32(r3
);
2066 tcg_temp_free_i32(r1
);
2071 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2073 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2074 return_low128(o
->out
);
2078 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2080 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2081 return_low128(o
->out
);
2085 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2087 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2088 return_low128(o
->out
);
2092 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2094 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2095 return_low128(o
->out
);
2099 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2101 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2105 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2107 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2111 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2113 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2114 return_low128(o
->out2
);
2118 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2120 int r2
= get_field(s
->fields
, r2
);
2121 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2125 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2127 /* No cache information provided. */
2128 tcg_gen_movi_i64(o
->out
, -1);
2132 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2134 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2138 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2140 int r1
= get_field(s
->fields
, r1
);
2141 int r2
= get_field(s
->fields
, r2
);
2142 TCGv_i64 t
= tcg_temp_new_i64();
2144 /* Note the "subsequently" in the PoO, which implies a defined result
2145 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2146 tcg_gen_shri_i64(t
, psw_mask
, 32);
2147 store_reg32_i64(r1
, t
);
2149 store_reg32_i64(r2
, psw_mask
);
2152 tcg_temp_free_i64(t
);
2156 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2158 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2159 tb->flags, (ab)use the tb->cs_base field as the address of
2160 the template in memory, and grab 8 bits of tb->flags/cflags for
2161 the contents of the register. We would then recognize all this
2162 in gen_intermediate_code_internal, generating code for exactly
2163 one instruction. This new TB then gets executed normally.
2165 On the other hand, this seems to be mostly used for modifying
2166 MVC inside of memcpy, which needs a helper call anyway. So
2167 perhaps this doesn't bear thinking about any further. */
2174 tmp
= tcg_const_i64(s
->next_pc
);
2175 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2176 tcg_temp_free_i64(tmp
);
2181 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2183 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2184 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2185 tcg_temp_free_i32(m3
);
2189 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2191 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2192 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2193 tcg_temp_free_i32(m3
);
2197 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2199 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2200 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2201 return_low128(o
->out2
);
2202 tcg_temp_free_i32(m3
);
2206 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2208 /* We'll use the original input for cc computation, since we get to
2209 compare that against 0, which ought to be better than comparing
2210 the real output against 64. It also lets cc_dst be a convenient
2211 temporary during our computation. */
2212 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2214 /* R1 = IN ? CLZ(IN) : 64. */
2215 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2217 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2218 value by 64, which is undefined. But since the shift is 64 iff the
2219 input is zero, we still get the correct result after and'ing. */
2220 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2221 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2222 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2226 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2228 int m3
= get_field(s
->fields
, m3
);
2229 int pos
, len
, base
= s
->insn
->data
;
2230 TCGv_i64 tmp
= tcg_temp_new_i64();
2235 /* Effectively a 32-bit load. */
2236 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2243 /* Effectively a 16-bit load. */
2244 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2252 /* Effectively an 8-bit load. */
2253 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2258 pos
= base
+ ctz32(m3
) * 8;
2259 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2260 ccm
= ((1ull << len
) - 1) << pos
;
2264 /* This is going to be a sequence of loads and inserts. */
2265 pos
= base
+ 32 - 8;
2269 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2270 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2271 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2274 m3
= (m3
<< 1) & 0xf;
2280 tcg_gen_movi_i64(tmp
, ccm
);
2281 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2282 tcg_temp_free_i64(tmp
);
2286 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2288 int shift
= s
->insn
->data
& 0xff;
2289 int size
= s
->insn
->data
>> 8;
2290 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2294 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2299 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2301 t1
= tcg_temp_new_i64();
2302 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2303 tcg_gen_shri_i64(t1
, t1
, 36);
2304 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2306 tcg_gen_extu_i32_i64(t1
, cc_op
);
2307 tcg_gen_shli_i64(t1
, t1
, 28);
2308 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2309 tcg_temp_free_i64(t1
);
2313 #ifndef CONFIG_USER_ONLY
2314 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2316 check_privileged(s
);
2317 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2321 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2323 check_privileged(s
);
2324 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2329 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2331 /* The real output is indeed the original value in memory;
2332 recompute the addition for the computation of CC. */
2333 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2334 s
->insn
->data
| MO_ALIGN
);
2335 /* However, we need to recompute the addition for setting CC. */
2336 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2340 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2342 /* The real output is indeed the original value in memory;
2343 recompute the addition for the computation of CC. */
2344 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2345 s
->insn
->data
| MO_ALIGN
);
2346 /* However, we need to recompute the operation for setting CC. */
2347 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2351 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2353 /* The real output is indeed the original value in memory;
2354 recompute the addition for the computation of CC. */
2355 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2356 s
->insn
->data
| MO_ALIGN
);
2357 /* However, we need to recompute the operation for setting CC. */
2358 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2362 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2364 /* The real output is indeed the original value in memory;
2365 recompute the addition for the computation of CC. */
2366 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2367 s
->insn
->data
| MO_ALIGN
);
2368 /* However, we need to recompute the operation for setting CC. */
2369 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2373 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2375 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2379 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2381 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2385 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2387 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2391 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2393 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2397 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2399 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2400 return_low128(o
->out2
);
2404 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2406 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2407 return_low128(o
->out2
);
2411 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2413 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2417 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2419 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2423 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2425 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2429 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2431 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2435 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2437 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2441 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2443 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2447 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2449 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2453 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2455 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2459 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2461 TCGLabel
*lab
= gen_new_label();
2462 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2463 /* The value is stored even in case of trap. */
2464 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2470 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2472 TCGLabel
*lab
= gen_new_label();
2473 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2474 /* The value is stored even in case of trap. */
2475 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2481 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2483 TCGLabel
*lab
= gen_new_label();
2484 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2485 /* The value is stored even in case of trap. */
2486 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2492 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2494 TCGLabel
*lab
= gen_new_label();
2495 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2496 /* The value is stored even in case of trap. */
2497 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2503 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2505 TCGLabel
*lab
= gen_new_label();
2506 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2507 /* The value is stored even in case of trap. */
2508 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2514 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2518 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2521 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2525 TCGv_i32 t32
= tcg_temp_new_i32();
2528 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2531 t
= tcg_temp_new_i64();
2532 tcg_gen_extu_i32_i64(t
, t32
);
2533 tcg_temp_free_i32(t32
);
2535 z
= tcg_const_i64(0);
2536 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2537 tcg_temp_free_i64(t
);
2538 tcg_temp_free_i64(z
);
2544 #ifndef CONFIG_USER_ONLY
2545 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2547 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2548 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2549 check_privileged(s
);
2550 potential_page_fault(s
);
2551 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2552 tcg_temp_free_i32(r1
);
2553 tcg_temp_free_i32(r3
);
2557 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2559 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2560 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2561 check_privileged(s
);
2562 potential_page_fault(s
);
2563 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2564 tcg_temp_free_i32(r1
);
2565 tcg_temp_free_i32(r3
);
2569 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2571 check_privileged(s
);
2572 potential_page_fault(s
);
2573 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2578 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2580 check_privileged(s
);
2582 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2586 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2590 check_privileged(s
);
2591 per_breaking_event(s
);
2593 t1
= tcg_temp_new_i64();
2594 t2
= tcg_temp_new_i64();
2595 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2596 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2597 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2598 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2599 tcg_gen_shli_i64(t1
, t1
, 32);
2600 gen_helper_load_psw(cpu_env
, t1
, t2
);
2601 tcg_temp_free_i64(t1
);
2602 tcg_temp_free_i64(t2
);
2603 return EXIT_NORETURN
;
2606 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2610 check_privileged(s
);
2611 per_breaking_event(s
);
2613 t1
= tcg_temp_new_i64();
2614 t2
= tcg_temp_new_i64();
2615 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2616 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2617 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2618 gen_helper_load_psw(cpu_env
, t1
, t2
);
2619 tcg_temp_free_i64(t1
);
2620 tcg_temp_free_i64(t2
);
2621 return EXIT_NORETURN
;
2625 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2627 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2628 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2629 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2630 tcg_temp_free_i32(r1
);
2631 tcg_temp_free_i32(r3
);
2635 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2637 int r1
= get_field(s
->fields
, r1
);
2638 int r3
= get_field(s
->fields
, r3
);
2641 /* Only one register to read. */
2642 t1
= tcg_temp_new_i64();
2643 if (unlikely(r1
== r3
)) {
2644 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2645 store_reg32_i64(r1
, t1
);
2650 /* First load the values of the first and last registers to trigger
2651 possible page faults. */
2652 t2
= tcg_temp_new_i64();
2653 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2654 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2655 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2656 store_reg32_i64(r1
, t1
);
2657 store_reg32_i64(r3
, t2
);
2659 /* Only two registers to read. */
2660 if (((r1
+ 1) & 15) == r3
) {
2666 /* Then load the remaining registers. Page fault can't occur. */
2668 tcg_gen_movi_i64(t2
, 4);
2671 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2672 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2673 store_reg32_i64(r1
, t1
);
2681 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2683 int r1
= get_field(s
->fields
, r1
);
2684 int r3
= get_field(s
->fields
, r3
);
2687 /* Only one register to read. */
2688 t1
= tcg_temp_new_i64();
2689 if (unlikely(r1
== r3
)) {
2690 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2691 store_reg32h_i64(r1
, t1
);
2696 /* First load the values of the first and last registers to trigger
2697 possible page faults. */
2698 t2
= tcg_temp_new_i64();
2699 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2700 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2701 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2702 store_reg32h_i64(r1
, t1
);
2703 store_reg32h_i64(r3
, t2
);
2705 /* Only two registers to read. */
2706 if (((r1
+ 1) & 15) == r3
) {
2712 /* Then load the remaining registers. Page fault can't occur. */
2714 tcg_gen_movi_i64(t2
, 4);
2717 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2718 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2719 store_reg32h_i64(r1
, t1
);
2727 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2729 int r1
= get_field(s
->fields
, r1
);
2730 int r3
= get_field(s
->fields
, r3
);
2733 /* Only one register to read. */
2734 if (unlikely(r1
== r3
)) {
2735 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2739 /* First load the values of the first and last registers to trigger
2740 possible page faults. */
2741 t1
= tcg_temp_new_i64();
2742 t2
= tcg_temp_new_i64();
2743 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2744 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2745 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2746 tcg_gen_mov_i64(regs
[r1
], t1
);
2749 /* Only two registers to read. */
2750 if (((r1
+ 1) & 15) == r3
) {
2755 /* Then load the remaining registers. Page fault can't occur. */
2757 tcg_gen_movi_i64(t1
, 8);
2760 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2761 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2768 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2771 TCGMemOp mop
= s
->insn
->data
;
2773 /* In a parallel context, stop the world and single step. */
2774 if (parallel_cpus
) {
2775 potential_page_fault(s
);
2776 gen_exception(EXCP_ATOMIC
);
2777 return EXIT_NORETURN
;
2780 /* In a serial context, perform the two loads ... */
2781 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2782 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2783 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2784 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2785 tcg_temp_free_i64(a1
);
2786 tcg_temp_free_i64(a2
);
2788 /* ... and indicate that we performed them while interlocked. */
2789 gen_op_movi_cc(s
, 0);
2793 #ifndef CONFIG_USER_ONLY
2794 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2796 check_privileged(s
);
2797 potential_page_fault(s
);
2798 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2802 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2804 check_privileged(s
);
2805 potential_page_fault(s
);
2806 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2811 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2814 o
->g_out
= o
->g_in2
;
2815 TCGV_UNUSED_I64(o
->in2
);
2820 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2822 int b2
= get_field(s
->fields
, b2
);
2823 TCGv ar1
= tcg_temp_new_i64();
2826 o
->g_out
= o
->g_in2
;
2827 TCGV_UNUSED_I64(o
->in2
);
2830 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2831 case PSW_ASC_PRIMARY
>> 32:
2832 tcg_gen_movi_i64(ar1
, 0);
2834 case PSW_ASC_ACCREG
>> 32:
2835 tcg_gen_movi_i64(ar1
, 1);
2837 case PSW_ASC_SECONDARY
>> 32:
2839 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2841 tcg_gen_movi_i64(ar1
, 0);
2844 case PSW_ASC_HOME
>> 32:
2845 tcg_gen_movi_i64(ar1
, 2);
2849 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2850 tcg_temp_free_i64(ar1
);
2855 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2859 o
->g_out
= o
->g_in1
;
2860 o
->g_out2
= o
->g_in2
;
2861 TCGV_UNUSED_I64(o
->in1
);
2862 TCGV_UNUSED_I64(o
->in2
);
2863 o
->g_in1
= o
->g_in2
= false;
2867 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2869 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2870 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2871 tcg_temp_free_i32(l
);
2875 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2877 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2878 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2879 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2880 tcg_temp_free_i32(r1
);
2881 tcg_temp_free_i32(r2
);
2886 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2888 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2889 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2890 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2891 tcg_temp_free_i32(r1
);
2892 tcg_temp_free_i32(r3
);
2897 #ifndef CONFIG_USER_ONLY
2898 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2900 int r1
= get_field(s
->fields
, l1
);
2901 check_privileged(s
);
2902 potential_page_fault(s
);
2903 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2908 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2910 int r1
= get_field(s
->fields
, l1
);
2911 check_privileged(s
);
2912 potential_page_fault(s
);
2913 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2919 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2921 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2926 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2928 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2930 return_low128(o
->in2
);
2934 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2936 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2940 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2942 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2946 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2948 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2952 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2954 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2958 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2960 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2964 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2966 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2967 return_low128(o
->out2
);
2971 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2973 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2974 return_low128(o
->out2
);
2978 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2980 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2981 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2982 tcg_temp_free_i64(r3
);
2986 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2988 int r3
= get_field(s
->fields
, r3
);
2989 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2993 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2995 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2996 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2997 tcg_temp_free_i64(r3
);
3001 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3003 int r3
= get_field(s
->fields
, r3
);
3004 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3008 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3011 z
= tcg_const_i64(0);
3012 n
= tcg_temp_new_i64();
3013 tcg_gen_neg_i64(n
, o
->in2
);
3014 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3015 tcg_temp_free_i64(n
);
3016 tcg_temp_free_i64(z
);
3020 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3022 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3026 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3028 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3032 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3034 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3035 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3039 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3041 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3042 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3043 tcg_temp_free_i32(l
);
3048 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3050 tcg_gen_neg_i64(o
->out
, o
->in2
);
3054 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3056 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3060 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3062 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3066 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3068 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3069 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3073 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3075 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3076 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3077 tcg_temp_free_i32(l
);
3082 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3084 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3088 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3090 int shift
= s
->insn
->data
& 0xff;
3091 int size
= s
->insn
->data
>> 8;
3092 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3095 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3096 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3098 /* Produce the CC from only the bits manipulated. */
3099 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3100 set_cc_nz_u64(s
, cc_dst
);
3104 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3106 gen_helper_popcnt(o
->out
, o
->in2
);
3110 #ifndef CONFIG_USER_ONLY
3111 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3113 check_privileged(s
);
3114 gen_helper_ptlb(cpu_env
);
3119 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3121 int i3
= get_field(s
->fields
, i3
);
3122 int i4
= get_field(s
->fields
, i4
);
3123 int i5
= get_field(s
->fields
, i5
);
3124 int do_zero
= i4
& 0x80;
3125 uint64_t mask
, imask
, pmask
;
3128 /* Adjust the arguments for the specific insn. */
3129 switch (s
->fields
->op2
) {
3130 case 0x55: /* risbg */
3135 case 0x5d: /* risbhg */
3138 pmask
= 0xffffffff00000000ull
;
3140 case 0x51: /* risblg */
3143 pmask
= 0x00000000ffffffffull
;
3149 /* MASK is the set of bits to be inserted from R2.
3150 Take care for I3/I4 wraparound. */
3153 mask
^= pmask
>> i4
>> 1;
3155 mask
|= ~(pmask
>> i4
>> 1);
3159 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3160 insns, we need to keep the other half of the register. */
3161 imask
= ~mask
| ~pmask
;
3163 if (s
->fields
->op2
== 0x55) {
3173 if (s
->fields
->op2
== 0x5d) {
3177 /* In some cases we can implement this with extract. */
3178 if (imask
== 0 && pos
== 0 && len
> 0 && rot
+ len
<= 64) {
3179 tcg_gen_extract_i64(o
->out
, o
->in2
, rot
, len
);
3183 /* In some cases we can implement this with deposit. */
3184 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3185 /* Note that we rotate the bits to be inserted to the lsb, not to
3186 the position as described in the PoO. */
3187 rot
= (rot
- pos
) & 63;
3192 /* Rotate the input as necessary. */
3193 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3195 /* Insert the selected bits into the output. */
3198 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3200 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3202 } else if (imask
== 0) {
3203 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3205 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3206 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3207 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3212 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3214 int i3
= get_field(s
->fields
, i3
);
3215 int i4
= get_field(s
->fields
, i4
);
3216 int i5
= get_field(s
->fields
, i5
);
3219 /* If this is a test-only form, arrange to discard the result. */
3221 o
->out
= tcg_temp_new_i64();
3229 /* MASK is the set of bits to be operated on from R2.
3230 Take care for I3/I4 wraparound. */
3233 mask
^= ~0ull >> i4
>> 1;
3235 mask
|= ~(~0ull >> i4
>> 1);
3238 /* Rotate the input as necessary. */
3239 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3242 switch (s
->fields
->op2
) {
3243 case 0x55: /* AND */
3244 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3245 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3248 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3249 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3251 case 0x57: /* XOR */
3252 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3253 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3260 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3261 set_cc_nz_u64(s
, cc_dst
);
3265 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3267 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3271 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3273 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3277 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3279 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3283 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3285 TCGv_i32 t1
= tcg_temp_new_i32();
3286 TCGv_i32 t2
= tcg_temp_new_i32();
3287 TCGv_i32 to
= tcg_temp_new_i32();
3288 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3289 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3290 tcg_gen_rotl_i32(to
, t1
, t2
);
3291 tcg_gen_extu_i32_i64(o
->out
, to
);
3292 tcg_temp_free_i32(t1
);
3293 tcg_temp_free_i32(t2
);
3294 tcg_temp_free_i32(to
);
3298 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3300 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3304 #ifndef CONFIG_USER_ONLY
3305 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3307 check_privileged(s
);
3308 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3313 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3315 check_privileged(s
);
3316 gen_helper_sacf(cpu_env
, o
->in2
);
3317 /* Addressing mode has changed, so end the block. */
3318 return EXIT_PC_STALE
;
3322 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3324 int sam
= s
->insn
->data
;
3340 /* Bizarre but true, we check the address of the current insn for the
3341 specification exception, not the next to be executed. Thus the PoO
3342 documents that Bad Things Happen two bytes before the end. */
3343 if (s
->pc
& ~mask
) {
3344 gen_program_exception(s
, PGM_SPECIFICATION
);
3345 return EXIT_NORETURN
;
3349 tsam
= tcg_const_i64(sam
);
3350 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3351 tcg_temp_free_i64(tsam
);
3353 /* Always exit the TB, since we (may have) changed execution mode. */
3354 return EXIT_PC_STALE
;
3357 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3359 int r1
= get_field(s
->fields
, r1
);
3360 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3364 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3366 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3370 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3372 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3376 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3378 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3379 return_low128(o
->out2
);
3383 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3385 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3389 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3391 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3395 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3397 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3398 return_low128(o
->out2
);
3402 #ifndef CONFIG_USER_ONLY
3403 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3405 check_privileged(s
);
3406 potential_page_fault(s
);
3407 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3412 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3414 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3415 check_privileged(s
);
3416 potential_page_fault(s
);
3417 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3419 tcg_temp_free_i32(r1
);
3424 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3431 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3433 /* We want to store when the condition is fulfilled, so branch
3434 out when it's not */
3435 c
.cond
= tcg_invert_cond(c
.cond
);
3437 lab
= gen_new_label();
3439 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3441 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3445 r1
= get_field(s
->fields
, r1
);
3446 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3447 if (s
->insn
->data
) {
3448 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3450 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3452 tcg_temp_free_i64(a
);
3458 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3460 uint64_t sign
= 1ull << s
->insn
->data
;
3461 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3462 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3463 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3464 /* The arithmetic left shift is curious in that it does not affect
3465 the sign bit. Copy that over from the source unchanged. */
3466 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3467 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3468 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3472 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3474 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3478 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3480 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3484 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3486 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3490 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3492 gen_helper_sfpc(cpu_env
, o
->in2
);
3496 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3498 gen_helper_sfas(cpu_env
, o
->in2
);
3502 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3504 int b2
= get_field(s
->fields
, b2
);
3505 int d2
= get_field(s
->fields
, d2
);
3506 TCGv_i64 t1
= tcg_temp_new_i64();
3507 TCGv_i64 t2
= tcg_temp_new_i64();
3510 switch (s
->fields
->op2
) {
3511 case 0x99: /* SRNM */
3514 case 0xb8: /* SRNMB */
3517 case 0xb9: /* SRNMT */
3523 mask
= (1 << len
) - 1;
3525 /* Insert the value into the appropriate field of the FPC. */
3527 tcg_gen_movi_i64(t1
, d2
& mask
);
3529 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3530 tcg_gen_andi_i64(t1
, t1
, mask
);
3532 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3533 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3534 tcg_temp_free_i64(t1
);
3536 /* Then install the new FPC to set the rounding mode in fpu_status. */
3537 gen_helper_sfpc(cpu_env
, t2
);
3538 tcg_temp_free_i64(t2
);
3542 #ifndef CONFIG_USER_ONLY
3543 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3545 check_privileged(s
);
3546 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3547 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3551 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3553 check_privileged(s
);
3554 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3558 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3560 check_privileged(s
);
3561 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3565 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3567 check_privileged(s
);
3568 /* ??? Surely cpu address != cpu number. In any case the previous
3569 version of this stored more than the required half-word, so it
3570 is unlikely this has ever been tested. */
3571 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3575 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3577 gen_helper_stck(o
->out
, cpu_env
);
3578 /* ??? We don't implement clock states. */
3579 gen_op_movi_cc(s
, 0);
3583 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3585 TCGv_i64 c1
= tcg_temp_new_i64();
3586 TCGv_i64 c2
= tcg_temp_new_i64();
3587 gen_helper_stck(c1
, cpu_env
);
3588 /* Shift the 64-bit value into its place as a zero-extended
3589 104-bit value. Note that "bit positions 64-103 are always
3590 non-zero so that they compare differently to STCK"; we set
3591 the least significant bit to 1. */
3592 tcg_gen_shli_i64(c2
, c1
, 56);
3593 tcg_gen_shri_i64(c1
, c1
, 8);
3594 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3595 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3596 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3597 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3598 tcg_temp_free_i64(c1
);
3599 tcg_temp_free_i64(c2
);
3600 /* ??? We don't implement clock states. */
3601 gen_op_movi_cc(s
, 0);
3605 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3607 check_privileged(s
);
3608 gen_helper_sckc(cpu_env
, o
->in2
);
3612 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3614 check_privileged(s
);
3615 gen_helper_stckc(o
->out
, cpu_env
);
3619 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3621 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3622 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3623 check_privileged(s
);
3624 potential_page_fault(s
);
3625 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3626 tcg_temp_free_i32(r1
);
3627 tcg_temp_free_i32(r3
);
3631 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3633 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3634 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3635 check_privileged(s
);
3636 potential_page_fault(s
);
3637 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3638 tcg_temp_free_i32(r1
);
3639 tcg_temp_free_i32(r3
);
3643 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3645 TCGv_i64 t1
= tcg_temp_new_i64();
3647 check_privileged(s
);
3648 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3649 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3650 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3651 tcg_temp_free_i64(t1
);
3656 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3658 check_privileged(s
);
3659 gen_helper_spt(cpu_env
, o
->in2
);
3663 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3665 check_privileged(s
);
3666 gen_helper_stfl(cpu_env
);
3670 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3672 check_privileged(s
);
3673 gen_helper_stpt(o
->out
, cpu_env
);
3677 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3679 check_privileged(s
);
3680 potential_page_fault(s
);
3681 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3686 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3688 check_privileged(s
);
3689 gen_helper_spx(cpu_env
, o
->in2
);
3693 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3695 check_privileged(s
);
3696 potential_page_fault(s
);
3697 gen_helper_xsch(cpu_env
, regs
[1]);
3702 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3704 check_privileged(s
);
3705 potential_page_fault(s
);
3706 gen_helper_csch(cpu_env
, regs
[1]);
3711 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3713 check_privileged(s
);
3714 potential_page_fault(s
);
3715 gen_helper_hsch(cpu_env
, regs
[1]);
3720 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3722 check_privileged(s
);
3723 potential_page_fault(s
);
3724 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3729 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3731 check_privileged(s
);
3732 potential_page_fault(s
);
3733 gen_helper_rchp(cpu_env
, regs
[1]);
3738 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3740 check_privileged(s
);
3741 potential_page_fault(s
);
3742 gen_helper_rsch(cpu_env
, regs
[1]);
3747 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3749 check_privileged(s
);
3750 potential_page_fault(s
);
3751 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3756 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3758 check_privileged(s
);
3759 potential_page_fault(s
);
3760 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3765 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3767 check_privileged(s
);
3768 potential_page_fault(s
);
3769 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3774 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3776 check_privileged(s
);
3777 potential_page_fault(s
);
3778 gen_helper_chsc(cpu_env
, o
->in2
);
3783 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3785 check_privileged(s
);
3786 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3787 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3791 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3793 uint64_t i2
= get_field(s
->fields
, i2
);
3796 check_privileged(s
);
3798 /* It is important to do what the instruction name says: STORE THEN.
3799 If we let the output hook perform the store then if we fault and
3800 restart, we'll have the wrong SYSTEM MASK in place. */
3801 t
= tcg_temp_new_i64();
3802 tcg_gen_shri_i64(t
, psw_mask
, 56);
3803 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3804 tcg_temp_free_i64(t
);
3806 if (s
->fields
->op
== 0xac) {
3807 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3808 (i2
<< 56) | 0x00ffffffffffffffull
);
3810 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3815 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3817 check_privileged(s
);
3818 potential_page_fault(s
);
3819 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3823 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3825 check_privileged(s
);
3826 potential_page_fault(s
);
3827 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3832 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
3834 potential_page_fault(s
);
3835 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
3840 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3842 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3846 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3848 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3852 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3854 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3858 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3860 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3864 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3866 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3867 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3868 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3869 tcg_temp_free_i32(r1
);
3870 tcg_temp_free_i32(r3
);
3874 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3876 int m3
= get_field(s
->fields
, m3
);
3877 int pos
, base
= s
->insn
->data
;
3878 TCGv_i64 tmp
= tcg_temp_new_i64();
3880 pos
= base
+ ctz32(m3
) * 8;
3883 /* Effectively a 32-bit store. */
3884 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3885 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3891 /* Effectively a 16-bit store. */
3892 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3893 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3900 /* Effectively an 8-bit store. */
3901 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3902 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3906 /* This is going to be a sequence of shifts and stores. */
3907 pos
= base
+ 32 - 8;
3910 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3911 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3912 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3914 m3
= (m3
<< 1) & 0xf;
3919 tcg_temp_free_i64(tmp
);
3923 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3925 int r1
= get_field(s
->fields
, r1
);
3926 int r3
= get_field(s
->fields
, r3
);
3927 int size
= s
->insn
->data
;
3928 TCGv_i64 tsize
= tcg_const_i64(size
);
3932 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3934 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3939 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3943 tcg_temp_free_i64(tsize
);
3947 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3949 int r1
= get_field(s
->fields
, r1
);
3950 int r3
= get_field(s
->fields
, r3
);
3951 TCGv_i64 t
= tcg_temp_new_i64();
3952 TCGv_i64 t4
= tcg_const_i64(4);
3953 TCGv_i64 t32
= tcg_const_i64(32);
3956 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3957 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3961 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3965 tcg_temp_free_i64(t
);
3966 tcg_temp_free_i64(t4
);
3967 tcg_temp_free_i64(t32
);
3971 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3973 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3975 return_low128(o
->in2
);
3979 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3981 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3985 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3990 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3992 /* The !borrow flag is the msb of CC. Since we want the inverse of
3993 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3994 disas_jcc(s
, &cmp
, 8 | 4);
3995 borrow
= tcg_temp_new_i64();
3997 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3999 TCGv_i32 t
= tcg_temp_new_i32();
4000 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4001 tcg_gen_extu_i32_i64(borrow
, t
);
4002 tcg_temp_free_i32(t
);
4006 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4007 tcg_temp_free_i64(borrow
);
4011 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4018 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4019 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4020 tcg_temp_free_i32(t
);
4022 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
4023 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4024 tcg_temp_free_i32(t
);
4026 gen_exception(EXCP_SVC
);
4027 return EXIT_NORETURN
;
4030 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4032 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4037 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4039 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4044 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4046 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4051 #ifndef CONFIG_USER_ONLY
4053 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4055 check_privileged(s
);
4056 potential_page_fault(s
);
4057 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4062 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4064 potential_page_fault(s
);
4065 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4072 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4074 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4075 potential_page_fault(s
);
4076 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4077 tcg_temp_free_i32(l
);
4082 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4084 potential_page_fault(s
);
4085 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4086 return_low128(o
->out2
);
4091 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4093 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4094 potential_page_fault(s
);
4095 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4096 tcg_temp_free_i32(l
);
4101 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4103 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4104 potential_page_fault(s
);
4105 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4106 tcg_temp_free_i32(l
);
4110 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4112 int d1
= get_field(s
->fields
, d1
);
4113 int d2
= get_field(s
->fields
, d2
);
4114 int b1
= get_field(s
->fields
, b1
);
4115 int b2
= get_field(s
->fields
, b2
);
4116 int l
= get_field(s
->fields
, l1
);
4119 o
->addr1
= get_address(s
, 0, b1
, d1
);
4121 /* If the addresses are identical, this is a store/memset of zero. */
4122 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4123 o
->in2
= tcg_const_i64(0);
4127 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4130 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4134 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4137 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4141 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4144 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4148 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4150 gen_op_movi_cc(s
, 0);
4154 /* But in general we'll defer to a helper. */
4155 o
->in2
= get_address(s
, 0, b2
, d2
);
4156 t32
= tcg_const_i32(l
);
4157 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4158 tcg_temp_free_i32(t32
);
4163 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4165 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4169 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4171 int shift
= s
->insn
->data
& 0xff;
4172 int size
= s
->insn
->data
>> 8;
4173 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4176 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4177 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4179 /* Produce the CC from only the bits manipulated. */
4180 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4181 set_cc_nz_u64(s
, cc_dst
);
4185 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4187 o
->out
= tcg_const_i64(0);
4191 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4193 o
->out
= tcg_const_i64(0);
4199 /* ====================================================================== */
4200 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4201 the original inputs), update the various cc data structures in order to
4202 be able to compute the new condition code. */
4204 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4206 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4209 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4211 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4214 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4216 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4219 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4221 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4224 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4226 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4229 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4231 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4234 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4236 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4239 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4241 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4244 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4246 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4249 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4251 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4254 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4256 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4259 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4261 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4264 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4266 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4269 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4271 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4274 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4276 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4279 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4281 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4284 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4286 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4289 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4291 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4294 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4296 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4299 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4301 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4302 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4305 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4307 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4310 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4312 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4315 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4317 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4320 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4322 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4325 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4327 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4330 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4332 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4335 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4337 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4340 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4342 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4345 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4347 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4350 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4352 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4355 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4357 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4360 /* ====================================================================== */
4361 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4362 with the TCG register to which we will write. Used in combination with
4363 the "wout" generators, in some cases we need a new temporary, and in
4364 some cases we can write to a TCG global. */
4366 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4368 o
->out
= tcg_temp_new_i64();
4370 #define SPEC_prep_new 0
4372 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4374 o
->out
= tcg_temp_new_i64();
4375 o
->out2
= tcg_temp_new_i64();
4377 #define SPEC_prep_new_P 0
4379 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4381 o
->out
= regs
[get_field(f
, r1
)];
4384 #define SPEC_prep_r1 0
4386 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4388 int r1
= get_field(f
, r1
);
4390 o
->out2
= regs
[r1
+ 1];
4391 o
->g_out
= o
->g_out2
= true;
4393 #define SPEC_prep_r1_P SPEC_r1_even
4395 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4397 o
->out
= fregs
[get_field(f
, r1
)];
4400 #define SPEC_prep_f1 0
4402 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4404 int r1
= get_field(f
, r1
);
4406 o
->out2
= fregs
[r1
+ 2];
4407 o
->g_out
= o
->g_out2
= true;
4409 #define SPEC_prep_x1 SPEC_r1_f128
4411 /* ====================================================================== */
4412 /* The "Write OUTput" generators. These generally perform some non-trivial
4413 copy of data to TCG globals, or to main memory. The trivial cases are
4414 generally handled by having a "prep" generator install the TCG global
4415 as the destination of the operation. */
4417 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4419 store_reg(get_field(f
, r1
), o
->out
);
4421 #define SPEC_wout_r1 0
4423 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4425 int r1
= get_field(f
, r1
);
4426 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4428 #define SPEC_wout_r1_8 0
4430 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4432 int r1
= get_field(f
, r1
);
4433 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4435 #define SPEC_wout_r1_16 0
4437 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4439 store_reg32_i64(get_field(f
, r1
), o
->out
);
4441 #define SPEC_wout_r1_32 0
4443 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4445 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4447 #define SPEC_wout_r1_32h 0
4449 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4451 int r1
= get_field(f
, r1
);
4452 store_reg32_i64(r1
, o
->out
);
4453 store_reg32_i64(r1
+ 1, o
->out2
);
4455 #define SPEC_wout_r1_P32 SPEC_r1_even
4457 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4459 int r1
= get_field(f
, r1
);
4460 store_reg32_i64(r1
+ 1, o
->out
);
4461 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4462 store_reg32_i64(r1
, o
->out
);
4464 #define SPEC_wout_r1_D32 SPEC_r1_even
4466 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4468 int r3
= get_field(f
, r3
);
4469 store_reg32_i64(r3
, o
->out
);
4470 store_reg32_i64(r3
+ 1, o
->out2
);
4472 #define SPEC_wout_r3_P32 SPEC_r3_even
4474 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4476 int r3
= get_field(f
, r3
);
4477 store_reg(r3
, o
->out
);
4478 store_reg(r3
+ 1, o
->out2
);
4480 #define SPEC_wout_r3_P64 SPEC_r3_even
4482 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4484 store_freg32_i64(get_field(f
, r1
), o
->out
);
4486 #define SPEC_wout_e1 0
4488 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4490 store_freg(get_field(f
, r1
), o
->out
);
4492 #define SPEC_wout_f1 0
4494 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4496 int f1
= get_field(s
->fields
, r1
);
4497 store_freg(f1
, o
->out
);
4498 store_freg(f1
+ 2, o
->out2
);
4500 #define SPEC_wout_x1 SPEC_r1_f128
4502 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4504 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4505 store_reg32_i64(get_field(f
, r1
), o
->out
);
4508 #define SPEC_wout_cond_r1r2_32 0
4510 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4512 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4513 store_freg32_i64(get_field(f
, r1
), o
->out
);
4516 #define SPEC_wout_cond_e1e2 0
4518 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4520 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4522 #define SPEC_wout_m1_8 0
4524 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4526 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4528 #define SPEC_wout_m1_16 0
4530 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4532 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4534 #define SPEC_wout_m1_32 0
4536 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4538 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4540 #define SPEC_wout_m1_64 0
4542 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4544 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4546 #define SPEC_wout_m2_32 0
4548 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4550 store_reg(get_field(f
, r1
), o
->in2
);
4552 #define SPEC_wout_in2_r1 0
4554 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4556 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4558 #define SPEC_wout_in2_r1_32 0
4560 /* ====================================================================== */
4561 /* The "INput 1" generators. These load the first operand to an insn. */
4563 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4565 o
->in1
= load_reg(get_field(f
, r1
));
4567 #define SPEC_in1_r1 0
4569 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4571 o
->in1
= regs
[get_field(f
, r1
)];
4574 #define SPEC_in1_r1_o 0
4576 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4578 o
->in1
= tcg_temp_new_i64();
4579 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4581 #define SPEC_in1_r1_32s 0
4583 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4585 o
->in1
= tcg_temp_new_i64();
4586 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4588 #define SPEC_in1_r1_32u 0
4590 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4592 o
->in1
= tcg_temp_new_i64();
4593 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4595 #define SPEC_in1_r1_sr32 0
4597 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4599 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4601 #define SPEC_in1_r1p1 SPEC_r1_even
4603 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4605 o
->in1
= tcg_temp_new_i64();
4606 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4608 #define SPEC_in1_r1p1_32s SPEC_r1_even
4610 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4612 o
->in1
= tcg_temp_new_i64();
4613 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4615 #define SPEC_in1_r1p1_32u SPEC_r1_even
4617 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4619 int r1
= get_field(f
, r1
);
4620 o
->in1
= tcg_temp_new_i64();
4621 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4623 #define SPEC_in1_r1_D32 SPEC_r1_even
4625 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4627 o
->in1
= load_reg(get_field(f
, r2
));
4629 #define SPEC_in1_r2 0
4631 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4633 o
->in1
= tcg_temp_new_i64();
4634 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4636 #define SPEC_in1_r2_sr32 0
4638 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4640 o
->in1
= load_reg(get_field(f
, r3
));
4642 #define SPEC_in1_r3 0
4644 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4646 o
->in1
= regs
[get_field(f
, r3
)];
4649 #define SPEC_in1_r3_o 0
4651 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4653 o
->in1
= tcg_temp_new_i64();
4654 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4656 #define SPEC_in1_r3_32s 0
4658 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4660 o
->in1
= tcg_temp_new_i64();
4661 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4663 #define SPEC_in1_r3_32u 0
4665 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4667 int r3
= get_field(f
, r3
);
4668 o
->in1
= tcg_temp_new_i64();
4669 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4671 #define SPEC_in1_r3_D32 SPEC_r3_even
4673 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4675 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4677 #define SPEC_in1_e1 0
4679 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4681 o
->in1
= fregs
[get_field(f
, r1
)];
4684 #define SPEC_in1_f1_o 0
4686 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4688 int r1
= get_field(f
, r1
);
4690 o
->out2
= fregs
[r1
+ 2];
4691 o
->g_out
= o
->g_out2
= true;
4693 #define SPEC_in1_x1_o SPEC_r1_f128
4695 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4697 o
->in1
= fregs
[get_field(f
, r3
)];
4700 #define SPEC_in1_f3_o 0
4702 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4704 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4706 #define SPEC_in1_la1 0
4708 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4710 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4711 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4713 #define SPEC_in1_la2 0
4715 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4718 o
->in1
= tcg_temp_new_i64();
4719 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4721 #define SPEC_in1_m1_8u 0
4723 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4726 o
->in1
= tcg_temp_new_i64();
4727 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4729 #define SPEC_in1_m1_16s 0
4731 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4734 o
->in1
= tcg_temp_new_i64();
4735 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4737 #define SPEC_in1_m1_16u 0
4739 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4742 o
->in1
= tcg_temp_new_i64();
4743 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4745 #define SPEC_in1_m1_32s 0
4747 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4750 o
->in1
= tcg_temp_new_i64();
4751 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4753 #define SPEC_in1_m1_32u 0
4755 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4758 o
->in1
= tcg_temp_new_i64();
4759 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4761 #define SPEC_in1_m1_64 0
4763 /* ====================================================================== */
4764 /* The "INput 2" generators. These load the second operand to an insn. */
4766 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4768 o
->in2
= regs
[get_field(f
, r1
)];
4771 #define SPEC_in2_r1_o 0
4773 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4775 o
->in2
= tcg_temp_new_i64();
4776 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4778 #define SPEC_in2_r1_16u 0
4780 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4782 o
->in2
= tcg_temp_new_i64();
4783 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4785 #define SPEC_in2_r1_32u 0
4787 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4789 int r1
= get_field(f
, r1
);
4790 o
->in2
= tcg_temp_new_i64();
4791 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4793 #define SPEC_in2_r1_D32 SPEC_r1_even
4795 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4797 o
->in2
= load_reg(get_field(f
, r2
));
4799 #define SPEC_in2_r2 0
4801 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4803 o
->in2
= regs
[get_field(f
, r2
)];
4806 #define SPEC_in2_r2_o 0
4808 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4810 int r2
= get_field(f
, r2
);
4812 o
->in2
= load_reg(r2
);
4815 #define SPEC_in2_r2_nz 0
4817 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4819 o
->in2
= tcg_temp_new_i64();
4820 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4822 #define SPEC_in2_r2_8s 0
4824 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4826 o
->in2
= tcg_temp_new_i64();
4827 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4829 #define SPEC_in2_r2_8u 0
4831 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4833 o
->in2
= tcg_temp_new_i64();
4834 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4836 #define SPEC_in2_r2_16s 0
4838 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4840 o
->in2
= tcg_temp_new_i64();
4841 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4843 #define SPEC_in2_r2_16u 0
4845 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4847 o
->in2
= load_reg(get_field(f
, r3
));
4849 #define SPEC_in2_r3 0
4851 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4853 o
->in2
= tcg_temp_new_i64();
4854 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4856 #define SPEC_in2_r3_sr32 0
4858 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4860 o
->in2
= tcg_temp_new_i64();
4861 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4863 #define SPEC_in2_r2_32s 0
4865 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4867 o
->in2
= tcg_temp_new_i64();
4868 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4870 #define SPEC_in2_r2_32u 0
4872 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4874 o
->in2
= tcg_temp_new_i64();
4875 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4877 #define SPEC_in2_r2_sr32 0
4879 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4881 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4883 #define SPEC_in2_e2 0
4885 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4887 o
->in2
= fregs
[get_field(f
, r2
)];
4890 #define SPEC_in2_f2_o 0
4892 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4894 int r2
= get_field(f
, r2
);
4896 o
->in2
= fregs
[r2
+ 2];
4897 o
->g_in1
= o
->g_in2
= true;
4899 #define SPEC_in2_x2_o SPEC_r2_f128
4901 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4903 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4905 #define SPEC_in2_ra2 0
4907 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4909 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4910 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4912 #define SPEC_in2_a2 0
4914 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4916 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4918 #define SPEC_in2_ri2 0
4920 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4922 help_l2_shift(s
, f
, o
, 31);
4924 #define SPEC_in2_sh32 0
4926 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4928 help_l2_shift(s
, f
, o
, 63);
4930 #define SPEC_in2_sh64 0
4932 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4935 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4937 #define SPEC_in2_m2_8u 0
4939 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4942 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4944 #define SPEC_in2_m2_16s 0
4946 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4949 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4951 #define SPEC_in2_m2_16u 0
4953 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4956 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4958 #define SPEC_in2_m2_32s 0
4960 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4963 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4965 #define SPEC_in2_m2_32u 0
4967 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4970 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4972 #define SPEC_in2_m2_64 0
4974 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4977 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4979 #define SPEC_in2_mri2_16u 0
4981 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4984 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4986 #define SPEC_in2_mri2_32s 0
4988 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4991 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4993 #define SPEC_in2_mri2_32u 0
4995 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4998 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5000 #define SPEC_in2_mri2_64 0
5002 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5004 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5006 #define SPEC_in2_i2 0
5008 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5010 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5012 #define SPEC_in2_i2_8u 0
5014 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5016 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5018 #define SPEC_in2_i2_16u 0
5020 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5022 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5024 #define SPEC_in2_i2_32u 0
5026 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5028 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5029 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5031 #define SPEC_in2_i2_16u_shl 0
5033 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5035 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5036 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5038 #define SPEC_in2_i2_32u_shl 0
5040 #ifndef CONFIG_USER_ONLY
5041 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5043 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5045 #define SPEC_in2_insn 0
5048 /* ====================================================================== */
5050 /* Find opc within the table of insns. This is formulated as a switch
5051 statement so that (1) we get compile-time notice of cut-paste errors
5052 for duplicated opcodes, and (2) the compiler generates the binary
5053 search tree, rather than us having to post-process the table. */
5055 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5056 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5058 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5060 enum DisasInsnEnum
{
5061 #include "insn-data.def"
5065 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5069 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5071 .help_in1 = in1_##I1, \
5072 .help_in2 = in2_##I2, \
5073 .help_prep = prep_##P, \
5074 .help_wout = wout_##W, \
5075 .help_cout = cout_##CC, \
5076 .help_op = op_##OP, \
5080 /* Allow 0 to be used for NULL in the table below. */
5088 #define SPEC_in1_0 0
5089 #define SPEC_in2_0 0
5090 #define SPEC_prep_0 0
5091 #define SPEC_wout_0 0
5093 static const DisasInsn insn_info
[] = {
5094 #include "insn-data.def"
5098 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5099 case OPC: return &insn_info[insn_ ## NM];
5101 static const DisasInsn
*lookup_opc(uint16_t opc
)
5104 #include "insn-data.def"
5113 /* Extract a field from the insn. The INSN should be left-aligned in
5114 the uint64_t so that we can more easily utilize the big-bit-endian
5115 definitions we extract from the Principals of Operation. */
5117 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5125 /* Zero extract the field from the insn. */
5126 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5128 /* Sign-extend, or un-swap the field as necessary. */
5130 case 0: /* unsigned */
5132 case 1: /* signed */
5133 assert(f
->size
<= 32);
5134 m
= 1u << (f
->size
- 1);
5137 case 2: /* dl+dh split, signed 20 bit. */
5138 r
= ((int8_t)r
<< 12) | (r
>> 8);
5144 /* Validate that the "compressed" encoding we selected above is valid.
5145 I.e. we havn't make two different original fields overlap. */
5146 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5147 o
->presentC
|= 1 << f
->indexC
;
5148 o
->presentO
|= 1 << f
->indexO
;
5150 o
->c
[f
->indexC
] = r
;
5153 /* Lookup the insn at the current PC, extracting the operands into O and
5154 returning the info struct for the insn. Returns NULL for invalid insn. */
5156 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5159 uint64_t insn
, pc
= s
->pc
;
5161 const DisasInsn
*info
;
5163 insn
= ld_code2(env
, pc
);
5164 op
= (insn
>> 8) & 0xff;
5165 ilen
= get_ilen(op
);
5166 s
->next_pc
= s
->pc
+ ilen
;
5173 insn
= ld_code4(env
, pc
) << 32;
5176 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5182 /* We can't actually determine the insn format until we've looked up
5183 the full insn opcode. Which we can't do without locating the
5184 secondary opcode. Assume by default that OP2 is at bit 40; for
5185 those smaller insns that don't actually have a secondary opcode
5186 this will correctly result in OP2 = 0. */
5192 case 0xb2: /* S, RRF, RRE */
5193 case 0xb3: /* RRE, RRD, RRF */
5194 case 0xb9: /* RRE, RRF */
5195 case 0xe5: /* SSE, SIL */
5196 op2
= (insn
<< 8) >> 56;
5200 case 0xc0: /* RIL */
5201 case 0xc2: /* RIL */
5202 case 0xc4: /* RIL */
5203 case 0xc6: /* RIL */
5204 case 0xc8: /* SSF */
5205 case 0xcc: /* RIL */
5206 op2
= (insn
<< 12) >> 60;
5208 case 0xd0 ... 0xdf: /* SS */
5214 case 0xee ... 0xf3: /* SS */
5215 case 0xf8 ... 0xfd: /* SS */
5219 op2
= (insn
<< 40) >> 56;
5223 memset(f
, 0, sizeof(*f
));
5228 /* Lookup the instruction. */
5229 info
= lookup_opc(op
<< 8 | op2
);
5231 /* If we found it, extract the operands. */
5233 DisasFormat fmt
= info
->fmt
;
5236 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5237 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5243 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5245 const DisasInsn
*insn
;
5246 ExitStatus ret
= NO_EXIT
;
5250 /* Search for the insn in the table. */
5251 insn
= extract_insn(env
, s
, &f
);
5253 /* Not found means unimplemented/illegal opcode. */
5255 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5257 gen_illegal_opcode(s
);
5258 return EXIT_NORETURN
;
5261 #ifndef CONFIG_USER_ONLY
5262 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5263 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5264 gen_helper_per_ifetch(cpu_env
, addr
);
5265 tcg_temp_free_i64(addr
);
5269 /* Check for insn specification exceptions. */
5271 int spec
= insn
->spec
, excp
= 0, r
;
5273 if (spec
& SPEC_r1_even
) {
5274 r
= get_field(&f
, r1
);
5276 excp
= PGM_SPECIFICATION
;
5279 if (spec
& SPEC_r2_even
) {
5280 r
= get_field(&f
, r2
);
5282 excp
= PGM_SPECIFICATION
;
5285 if (spec
& SPEC_r3_even
) {
5286 r
= get_field(&f
, r3
);
5288 excp
= PGM_SPECIFICATION
;
5291 if (spec
& SPEC_r1_f128
) {
5292 r
= get_field(&f
, r1
);
5294 excp
= PGM_SPECIFICATION
;
5297 if (spec
& SPEC_r2_f128
) {
5298 r
= get_field(&f
, r2
);
5300 excp
= PGM_SPECIFICATION
;
5304 gen_program_exception(s
, excp
);
5305 return EXIT_NORETURN
;
5309 /* Set up the strutures we use to communicate with the helpers. */
5312 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5313 TCGV_UNUSED_I64(o
.out
);
5314 TCGV_UNUSED_I64(o
.out2
);
5315 TCGV_UNUSED_I64(o
.in1
);
5316 TCGV_UNUSED_I64(o
.in2
);
5317 TCGV_UNUSED_I64(o
.addr1
);
5319 /* Implement the instruction. */
5320 if (insn
->help_in1
) {
5321 insn
->help_in1(s
, &f
, &o
);
5323 if (insn
->help_in2
) {
5324 insn
->help_in2(s
, &f
, &o
);
5326 if (insn
->help_prep
) {
5327 insn
->help_prep(s
, &f
, &o
);
5329 if (insn
->help_op
) {
5330 ret
= insn
->help_op(s
, &o
);
5332 if (insn
->help_wout
) {
5333 insn
->help_wout(s
, &f
, &o
);
5335 if (insn
->help_cout
) {
5336 insn
->help_cout(s
, &o
);
5339 /* Free any temporaries created by the helpers. */
5340 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5341 tcg_temp_free_i64(o
.out
);
5343 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5344 tcg_temp_free_i64(o
.out2
);
5346 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5347 tcg_temp_free_i64(o
.in1
);
5349 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5350 tcg_temp_free_i64(o
.in2
);
5352 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5353 tcg_temp_free_i64(o
.addr1
);
5356 #ifndef CONFIG_USER_ONLY
5357 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5358 /* An exception might be triggered, save PSW if not already done. */
5359 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5360 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5366 /* Call the helper to check for a possible PER exception. */
5367 gen_helper_per_check_exception(cpu_env
);
5371 /* Advance to the next instruction. */
5376 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5378 S390CPU
*cpu
= s390_env_get_cpu(env
);
5379 CPUState
*cs
= CPU(cpu
);
5381 target_ulong pc_start
;
5382 uint64_t next_page_start
;
5383 int num_insns
, max_insns
;
5390 if (!(tb
->flags
& FLAG_MASK_64
)) {
5391 pc_start
&= 0x7fffffff;
5396 dc
.cc_op
= CC_OP_DYNAMIC
;
5397 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5399 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5402 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5403 if (max_insns
== 0) {
5404 max_insns
= CF_COUNT_MASK
;
5406 if (max_insns
> TCG_MAX_INSNS
) {
5407 max_insns
= TCG_MAX_INSNS
;
5413 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5416 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5417 status
= EXIT_PC_STALE
;
5419 /* The address covered by the breakpoint must be included in
5420 [tb->pc, tb->pc + tb->size) in order to for it to be
5421 properly cleared -- thus we increment the PC here so that
5422 the logic setting tb->size below does the right thing. */
5427 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5432 if (status
== NO_EXIT
) {
5433 status
= translate_one(env
, &dc
);
5436 /* If we reach a page boundary, are single stepping,
5437 or exhaust instruction count, stop generation. */
5438 if (status
== NO_EXIT
5439 && (dc
.pc
>= next_page_start
5440 || tcg_op_buf_full()
5441 || num_insns
>= max_insns
5443 || cs
->singlestep_enabled
)) {
5444 status
= EXIT_PC_STALE
;
5446 } while (status
== NO_EXIT
);
5448 if (tb
->cflags
& CF_LAST_IO
) {
5457 update_psw_addr(&dc
);
5459 case EXIT_PC_UPDATED
:
5460 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5461 cc op type is in env */
5463 /* Exit the TB, either by raising a debug exception or by return. */
5465 gen_exception(EXCP_DEBUG
);
5466 } else if (use_exit_tb(&dc
)) {
5469 tcg_gen_lookup_and_goto_ptr(psw_addr
);
5476 gen_tb_end(tb
, num_insns
);
5478 tb
->size
= dc
.pc
- pc_start
;
5479 tb
->icount
= num_insns
;
5481 #if defined(S390X_DEBUG_DISAS)
5482 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5483 && qemu_log_in_addr_range(pc_start
)) {
5485 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5486 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5493 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5496 int cc_op
= data
[1];
5497 env
->psw
.addr
= data
[0];
5498 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {