4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
62 bool singlestep_enabled
;
65 /* Information carried about a condition to be evaluated. */
72 struct { TCGv_i64 a
, b
; } s64
;
73 struct { TCGv_i32 a
, b
; } s32
;
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
97 S390CPU
*cpu
= S390_CPU(cs
);
98 CPUS390XState
*env
= &cpu
->env
;
101 if (env
->cc_op
> 3) {
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
103 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
105 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
106 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
109 for (i
= 0; i
< 16; i
++) {
110 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
112 cpu_fprintf(f
, "\n");
118 for (i
= 0; i
< 16; i
++) {
119 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
121 cpu_fprintf(f
, "\n");
127 for (i
= 0; i
< 32; i
++) {
128 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
129 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
130 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i
= 0; i
< 16; i
++) {
135 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
137 cpu_fprintf(f
, "\n");
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i
= 0; i
< CC_OP_MAX
; i
++) {
146 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
147 inline_branch_miss
[i
], inline_branch_hit
[i
]);
151 cpu_fprintf(f
, "\n");
154 static TCGv_i64 psw_addr
;
155 static TCGv_i64 psw_mask
;
156 static TCGv_i64 gbea
;
158 static TCGv_i32 cc_op
;
159 static TCGv_i64 cc_src
;
160 static TCGv_i64 cc_dst
;
161 static TCGv_i64 cc_vr
;
163 static char cpu_reg_names
[32][4];
164 static TCGv_i64 regs
[16];
165 static TCGv_i64 fregs
[16];
167 void s390x_translate_init(void)
171 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
172 tcg_ctx
.tcg_env
= cpu_env
;
173 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
174 offsetof(CPUS390XState
, psw
.addr
),
176 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
177 offsetof(CPUS390XState
, psw
.mask
),
179 gbea
= tcg_global_mem_new_i64(cpu_env
,
180 offsetof(CPUS390XState
, gbea
),
183 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
185 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
187 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
189 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
192 for (i
= 0; i
< 16; i
++) {
193 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
194 regs
[i
] = tcg_global_mem_new(cpu_env
,
195 offsetof(CPUS390XState
, regs
[i
]),
199 for (i
= 0; i
< 16; i
++) {
200 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
201 fregs
[i
] = tcg_global_mem_new(cpu_env
,
202 offsetof(CPUS390XState
, vregs
[i
][0].d
),
203 cpu_reg_names
[i
+ 16]);
207 static TCGv_i64
load_reg(int reg
)
209 TCGv_i64 r
= tcg_temp_new_i64();
210 tcg_gen_mov_i64(r
, regs
[reg
]);
214 static TCGv_i64
load_freg32_i64(int reg
)
216 TCGv_i64 r
= tcg_temp_new_i64();
217 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
221 static void store_reg(int reg
, TCGv_i64 v
)
223 tcg_gen_mov_i64(regs
[reg
], v
);
226 static void store_freg(int reg
, TCGv_i64 v
)
228 tcg_gen_mov_i64(fregs
[reg
], v
);
231 static void store_reg32_i64(int reg
, TCGv_i64 v
)
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
237 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
239 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
242 static void store_freg32_i64(int reg
, TCGv_i64 v
)
244 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
247 static void return_low128(TCGv_i64 dest
)
249 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
252 static void update_psw_addr(DisasContext
*s
)
255 tcg_gen_movi_i64(psw_addr
, s
->pc
);
258 static void per_branch(DisasContext
*s
, bool to_next
)
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea
, s
->pc
);
263 if (s
->tb
->flags
& FLAG_MASK_PER
) {
264 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
265 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
267 tcg_temp_free_i64(next_pc
);
273 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
274 TCGv_i64 arg1
, TCGv_i64 arg2
)
276 #ifndef CONFIG_USER_ONLY
277 if (s
->tb
->flags
& FLAG_MASK_PER
) {
278 TCGLabel
*lab
= gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
281 tcg_gen_movi_i64(gbea
, s
->pc
);
282 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
286 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
287 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
288 tcg_temp_free_i64(pc
);
293 static void per_breaking_event(DisasContext
*s
)
295 tcg_gen_movi_i64(gbea
, s
->pc
);
298 static void update_cc_op(DisasContext
*s
)
300 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
301 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
305 static void potential_page_fault(DisasContext
*s
)
311 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
313 return (uint64_t)cpu_lduw_code(env
, pc
);
316 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
321 static int get_mem_index(DisasContext
*s
)
323 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
324 case PSW_ASC_PRIMARY
>> 32:
326 case PSW_ASC_SECONDARY
>> 32:
328 case PSW_ASC_HOME
>> 32:
336 static void gen_exception(int excp
)
338 TCGv_i32 tmp
= tcg_const_i32(excp
);
339 gen_helper_exception(cpu_env
, tmp
);
340 tcg_temp_free_i32(tmp
);
343 static void gen_program_exception(DisasContext
*s
, int code
)
347 /* Remember what pgm exeption this was. */
348 tmp
= tcg_const_i32(code
);
349 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
350 tcg_temp_free_i32(tmp
);
352 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
353 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
354 tcg_temp_free_i32(tmp
);
356 /* Advance past instruction. */
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM
);
367 static inline void gen_illegal_opcode(DisasContext
*s
)
369 gen_program_exception(s
, PGM_OPERATION
);
372 static inline void gen_trap(DisasContext
*s
)
376 /* Set DXC to 0xff. */
377 t
= tcg_temp_new_i32();
378 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
379 tcg_gen_ori_i32(t
, t
, 0xff00);
380 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
381 tcg_temp_free_i32(t
);
383 gen_program_exception(s
, PGM_DATA
);
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext
*s
)
389 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
390 gen_program_exception(s
, PGM_PRIVILEGED
);
395 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
397 TCGv_i64 tmp
= tcg_temp_new_i64();
398 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
403 /* Note that addi optimizes the imm==0 case. */
405 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
406 tcg_gen_addi_i64(tmp
, tmp
, d2
);
408 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
410 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
416 tcg_gen_movi_i64(tmp
, d2
);
419 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
425 static inline bool live_cc_data(DisasContext
*s
)
427 return (s
->cc_op
!= CC_OP_DYNAMIC
428 && s
->cc_op
!= CC_OP_STATIC
432 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
434 if (live_cc_data(s
)) {
435 tcg_gen_discard_i64(cc_src
);
436 tcg_gen_discard_i64(cc_dst
);
437 tcg_gen_discard_i64(cc_vr
);
439 s
->cc_op
= CC_OP_CONST0
+ val
;
442 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
444 if (live_cc_data(s
)) {
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_discard_i64(cc_vr
);
448 tcg_gen_mov_i64(cc_dst
, dst
);
452 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
455 if (live_cc_data(s
)) {
456 tcg_gen_discard_i64(cc_vr
);
458 tcg_gen_mov_i64(cc_src
, src
);
459 tcg_gen_mov_i64(cc_dst
, dst
);
463 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
464 TCGv_i64 dst
, TCGv_i64 vr
)
466 tcg_gen_mov_i64(cc_src
, src
);
467 tcg_gen_mov_i64(cc_dst
, dst
);
468 tcg_gen_mov_i64(cc_vr
, vr
);
472 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
474 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
477 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
479 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
482 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
484 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
487 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
489 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext
*s
)
495 if (live_cc_data(s
)) {
496 tcg_gen_discard_i64(cc_src
);
497 tcg_gen_discard_i64(cc_dst
);
498 tcg_gen_discard_i64(cc_vr
);
500 s
->cc_op
= CC_OP_STATIC
;
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext
*s
)
506 TCGv_i32 local_cc_op
;
509 TCGV_UNUSED_I32(local_cc_op
);
510 TCGV_UNUSED_I64(dummy
);
513 dummy
= tcg_const_i64(0);
527 local_cc_op
= tcg_const_i32(s
->cc_op
);
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
547 /* env->cc_op already is the cc value */
562 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
567 case CC_OP_LTUGTU_32
:
568 case CC_OP_LTUGTU_64
:
575 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
590 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
600 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
601 tcg_temp_free_i32(local_cc_op
);
603 if (!TCGV_IS_UNUSED_I64(dummy
)) {
604 tcg_temp_free_i64(dummy
);
607 /* We now have cc in cc_op as constant */
611 static bool use_exit_tb(DisasContext
*s
)
613 return (s
->singlestep_enabled
||
614 (s
->tb
->cflags
& CF_LAST_IO
) ||
615 (s
->tb
->flags
& FLAG_MASK_PER
));
618 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
620 if (unlikely(use_exit_tb(s
))) {
623 #ifndef CONFIG_USER_ONLY
624 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
625 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
631 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
633 #ifdef DEBUG_INLINE_BRANCHES
634 inline_branch_miss
[cc_op
]++;
638 static void account_inline_branch(DisasContext
*s
, int cc_op
)
640 #ifdef DEBUG_INLINE_BRANCHES
641 inline_branch_hit
[cc_op
]++;
645 /* Table of mask values to comparison codes, given a comparison as input.
646 For such, CC=3 should not be possible. */
647 static const TCGCond ltgt_cond
[16] = {
648 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
649 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
650 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
651 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
652 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
653 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
654 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
655 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
658 /* Table of mask values to comparison codes, given a logic op as input.
659 For such, only CC=0 and CC=1 should be possible. */
660 static const TCGCond nz_cond
[16] = {
661 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
662 TCG_COND_NEVER
, TCG_COND_NEVER
,
663 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
664 TCG_COND_NE
, TCG_COND_NE
,
665 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
666 TCG_COND_EQ
, TCG_COND_EQ
,
667 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
668 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
671 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
672 details required to generate a TCG comparison. */
673 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
676 enum cc_op old_cc_op
= s
->cc_op
;
678 if (mask
== 15 || mask
== 0) {
679 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
682 c
->g1
= c
->g2
= true;
687 /* Find the TCG condition for the mask + cc op. */
693 cond
= ltgt_cond
[mask
];
694 if (cond
== TCG_COND_NEVER
) {
697 account_inline_branch(s
, old_cc_op
);
700 case CC_OP_LTUGTU_32
:
701 case CC_OP_LTUGTU_64
:
702 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
703 if (cond
== TCG_COND_NEVER
) {
706 account_inline_branch(s
, old_cc_op
);
710 cond
= nz_cond
[mask
];
711 if (cond
== TCG_COND_NEVER
) {
714 account_inline_branch(s
, old_cc_op
);
729 account_inline_branch(s
, old_cc_op
);
744 account_inline_branch(s
, old_cc_op
);
748 switch (mask
& 0xa) {
749 case 8: /* src == 0 -> no one bit found */
752 case 2: /* src != 0 -> one bit found */
758 account_inline_branch(s
, old_cc_op
);
764 case 8 | 2: /* vr == 0 */
767 case 4 | 1: /* vr != 0 */
770 case 8 | 4: /* no carry -> vr >= src */
773 case 2 | 1: /* carry -> vr < src */
779 account_inline_branch(s
, old_cc_op
);
784 /* Note that CC=0 is impossible; treat it as dont-care. */
786 case 2: /* zero -> op1 == op2 */
789 case 4 | 1: /* !zero -> op1 != op2 */
792 case 4: /* borrow (!carry) -> op1 < op2 */
795 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
801 account_inline_branch(s
, old_cc_op
);
806 /* Calculate cc value. */
811 /* Jump based on CC. We'll load up the real cond below;
812 the assignment here merely avoids a compiler warning. */
813 account_noninline_branch(s
, old_cc_op
);
814 old_cc_op
= CC_OP_STATIC
;
815 cond
= TCG_COND_NEVER
;
819 /* Load up the arguments of the comparison. */
821 c
->g1
= c
->g2
= false;
825 c
->u
.s32
.a
= tcg_temp_new_i32();
826 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
827 c
->u
.s32
.b
= tcg_const_i32(0);
830 case CC_OP_LTUGTU_32
:
833 c
->u
.s32
.a
= tcg_temp_new_i32();
834 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
835 c
->u
.s32
.b
= tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
843 c
->u
.s64
.b
= tcg_const_i64(0);
847 case CC_OP_LTUGTU_64
:
851 c
->g1
= c
->g2
= true;
857 c
->u
.s64
.a
= tcg_temp_new_i64();
858 c
->u
.s64
.b
= tcg_const_i64(0);
859 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
864 c
->u
.s32
.a
= tcg_temp_new_i32();
865 c
->u
.s32
.b
= tcg_temp_new_i32();
866 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
867 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
868 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
870 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
877 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
878 c
->u
.s64
.b
= tcg_const_i64(0);
890 case 0x8 | 0x4 | 0x2: /* cc != 3 */
892 c
->u
.s32
.b
= tcg_const_i32(3);
894 case 0x8 | 0x4 | 0x1: /* cc != 2 */
896 c
->u
.s32
.b
= tcg_const_i32(2);
898 case 0x8 | 0x2 | 0x1: /* cc != 1 */
900 c
->u
.s32
.b
= tcg_const_i32(1);
902 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
905 c
->u
.s32
.a
= tcg_temp_new_i32();
906 c
->u
.s32
.b
= tcg_const_i32(0);
907 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
909 case 0x8 | 0x4: /* cc < 2 */
911 c
->u
.s32
.b
= tcg_const_i32(2);
913 case 0x8: /* cc == 0 */
915 c
->u
.s32
.b
= tcg_const_i32(0);
917 case 0x4 | 0x2 | 0x1: /* cc != 0 */
919 c
->u
.s32
.b
= tcg_const_i32(0);
921 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
924 c
->u
.s32
.a
= tcg_temp_new_i32();
925 c
->u
.s32
.b
= tcg_const_i32(0);
926 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
928 case 0x4: /* cc == 1 */
930 c
->u
.s32
.b
= tcg_const_i32(1);
932 case 0x2 | 0x1: /* cc > 1 */
934 c
->u
.s32
.b
= tcg_const_i32(1);
936 case 0x2: /* cc == 2 */
938 c
->u
.s32
.b
= tcg_const_i32(2);
940 case 0x1: /* cc == 3 */
942 c
->u
.s32
.b
= tcg_const_i32(3);
945 /* CC is masked by something else: (8 >> cc) & mask. */
948 c
->u
.s32
.a
= tcg_const_i32(8);
949 c
->u
.s32
.b
= tcg_const_i32(0);
950 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
951 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
962 static void free_compare(DisasCompare
*c
)
966 tcg_temp_free_i64(c
->u
.s64
.a
);
968 tcg_temp_free_i32(c
->u
.s32
.a
);
973 tcg_temp_free_i64(c
->u
.s64
.b
);
975 tcg_temp_free_i32(c
->u
.s32
.b
);
980 /* ====================================================================== */
981 /* Define the insn format enumeration. */
982 #define F0(N) FMT_##N,
983 #define F1(N, X1) F0(N)
984 #define F2(N, X1, X2) F0(N)
985 #define F3(N, X1, X2, X3) F0(N)
986 #define F4(N, X1, X2, X3, X4) F0(N)
987 #define F5(N, X1, X2, X3, X4, X5) F0(N)
990 #include "insn-format.def"
1000 /* Define a structure to hold the decoded fields. We'll store each inside
1001 an array indexed by an enum. In order to conserve memory, we'll arrange
1002 for fields that do not exist at the same time to overlap, thus the "C"
1003 for compact. For checking purposes there is an "O" for original index
1004 as well that will be applied to availability bitmaps. */
1006 enum DisasFieldIndexO
{
1029 enum DisasFieldIndexC
{
1060 struct DisasFields
{
1064 unsigned presentC
:16;
1065 unsigned int presentO
;
1069 /* This is the way fields are to be accessed out of DisasFields. */
1070 #define have_field(S, F) have_field1((S), FLD_O_##F)
1071 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1073 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1075 return (f
->presentO
>> c
) & 1;
1078 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1079 enum DisasFieldIndexC c
)
1081 assert(have_field1(f
, o
));
1085 /* Describe the layout of each field in each format. */
1086 typedef struct DisasField
{
1088 unsigned int size
:8;
1089 unsigned int type
:2;
1090 unsigned int indexC
:6;
1091 enum DisasFieldIndexO indexO
:8;
1094 typedef struct DisasFormatInfo
{
1095 DisasField op
[NUM_C_FIELD
];
1098 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1099 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1100 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1102 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1105 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1106 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1107 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1109 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1110 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1111 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1113 #define F0(N) { { } },
1114 #define F1(N, X1) { { X1 } },
1115 #define F2(N, X1, X2) { { X1, X2 } },
1116 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1117 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1118 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1120 static const DisasFormatInfo format_info
[] = {
1121 #include "insn-format.def"
1139 /* Generally, we'll extract operands into this structures, operate upon
1140 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1141 of routines below for more details. */
1143 bool g_out
, g_out2
, g_in1
, g_in2
;
1144 TCGv_i64 out
, out2
, in1
, in2
;
1148 /* Instructions can place constraints on their operands, raising specification
1149 exceptions if they are violated. To make this easy to automate, each "in1",
1150 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1151 of the following, or 0. To make this easy to document, we'll put the
1152 SPEC_<name> defines next to <name>. */
1154 #define SPEC_r1_even 1
1155 #define SPEC_r2_even 2
1156 #define SPEC_r3_even 4
1157 #define SPEC_r1_f128 8
1158 #define SPEC_r2_f128 16
1160 /* Return values from translate_one, indicating the state of the TB. */
1162 /* Continue the TB. */
1164 /* We have emitted one or more goto_tb. No fixup required. */
1166 /* We are not using a goto_tb (for whatever reason), but have updated
1167 the PC (for whatever reason), so there's no need to do it again on
1170 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1171 updated the PC for the next instruction to be executed. */
1173 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1174 No following code will be executed. */
1178 typedef enum DisasFacility
{
1179 FAC_Z
, /* zarch (default) */
1180 FAC_CASS
, /* compare and swap and store */
1181 FAC_CASS2
, /* compare and swap and store 2*/
1182 FAC_DFP
, /* decimal floating point */
1183 FAC_DFPR
, /* decimal floating point rounding */
1184 FAC_DO
, /* distinct operands */
1185 FAC_EE
, /* execute extensions */
1186 FAC_EI
, /* extended immediate */
1187 FAC_FPE
, /* floating point extension */
1188 FAC_FPSSH
, /* floating point support sign handling */
1189 FAC_FPRGR
, /* FPR-GR transfer */
1190 FAC_GIE
, /* general instructions extension */
1191 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1192 FAC_HW
, /* high-word */
1193 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1194 FAC_MIE
, /* miscellaneous-instruction-extensions */
1195 FAC_LAT
, /* load-and-trap */
1196 FAC_LOC
, /* load/store on condition */
1197 FAC_LD
, /* long displacement */
1198 FAC_PC
, /* population count */
1199 FAC_SCF
, /* store clock fast */
1200 FAC_SFLE
, /* store facility list extended */
1201 FAC_ILA
, /* interlocked access facility 1 */
1202 FAC_LPP
, /* load-program-parameter */
1208 DisasFacility fac
:8;
1213 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1214 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1215 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1216 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1217 void (*help_cout
)(DisasContext
*, DisasOps
*);
1218 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1223 /* ====================================================================== */
1224 /* Miscellaneous helpers, used by several operations. */
1226 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1227 DisasOps
*o
, int mask
)
1229 int b2
= get_field(f
, b2
);
1230 int d2
= get_field(f
, d2
);
1233 o
->in2
= tcg_const_i64(d2
& mask
);
1235 o
->in2
= get_address(s
, 0, b2
, d2
);
1236 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1240 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1242 if (dest
== s
->next_pc
) {
1243 per_branch(s
, true);
1246 if (use_goto_tb(s
, dest
)) {
1248 per_breaking_event(s
);
1250 tcg_gen_movi_i64(psw_addr
, dest
);
1251 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1252 return EXIT_GOTO_TB
;
1254 tcg_gen_movi_i64(psw_addr
, dest
);
1255 per_branch(s
, false);
1256 return EXIT_PC_UPDATED
;
1260 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1261 bool is_imm
, int imm
, TCGv_i64 cdest
)
1264 uint64_t dest
= s
->pc
+ 2 * imm
;
1267 /* Take care of the special cases first. */
1268 if (c
->cond
== TCG_COND_NEVER
) {
1273 if (dest
== s
->next_pc
) {
1274 /* Branch to next. */
1275 per_branch(s
, true);
1279 if (c
->cond
== TCG_COND_ALWAYS
) {
1280 ret
= help_goto_direct(s
, dest
);
1284 if (TCGV_IS_UNUSED_I64(cdest
)) {
1285 /* E.g. bcr %r0 -> no branch. */
1289 if (c
->cond
== TCG_COND_ALWAYS
) {
1290 tcg_gen_mov_i64(psw_addr
, cdest
);
1291 per_branch(s
, false);
1292 ret
= EXIT_PC_UPDATED
;
1297 if (use_goto_tb(s
, s
->next_pc
)) {
1298 if (is_imm
&& use_goto_tb(s
, dest
)) {
1299 /* Both exits can use goto_tb. */
1302 lab
= gen_new_label();
1304 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1306 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1309 /* Branch not taken. */
1311 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1312 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1316 per_breaking_event(s
);
1318 tcg_gen_movi_i64(psw_addr
, dest
);
1319 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1323 /* Fallthru can use goto_tb, but taken branch cannot. */
1324 /* Store taken branch destination before the brcond. This
1325 avoids having to allocate a new local temp to hold it.
1326 We'll overwrite this in the not taken case anyway. */
1328 tcg_gen_mov_i64(psw_addr
, cdest
);
1331 lab
= gen_new_label();
1333 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1335 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1338 /* Branch not taken. */
1341 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1342 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1346 tcg_gen_movi_i64(psw_addr
, dest
);
1348 per_breaking_event(s
);
1349 ret
= EXIT_PC_UPDATED
;
1352 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1353 Most commonly we're single-stepping or some other condition that
1354 disables all use of goto_tb. Just update the PC and exit. */
1356 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1358 cdest
= tcg_const_i64(dest
);
1362 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1364 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1366 TCGv_i32 t0
= tcg_temp_new_i32();
1367 TCGv_i64 t1
= tcg_temp_new_i64();
1368 TCGv_i64 z
= tcg_const_i64(0);
1369 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1370 tcg_gen_extu_i32_i64(t1
, t0
);
1371 tcg_temp_free_i32(t0
);
1372 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1373 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1374 tcg_temp_free_i64(t1
);
1375 tcg_temp_free_i64(z
);
1379 tcg_temp_free_i64(cdest
);
1381 tcg_temp_free_i64(next
);
1383 ret
= EXIT_PC_UPDATED
;
1391 /* ====================================================================== */
1392 /* The operations. These perform the bulk of the work for any insn,
1393 usually after the operands have been loaded and output initialized. */
1395 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1398 z
= tcg_const_i64(0);
1399 n
= tcg_temp_new_i64();
1400 tcg_gen_neg_i64(n
, o
->in2
);
1401 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1402 tcg_temp_free_i64(n
);
1403 tcg_temp_free_i64(z
);
1407 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1409 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1413 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1415 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1419 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1421 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1422 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1426 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1428 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1432 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1437 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1439 /* The carry flag is the msb of CC, therefore the branch mask that would
1440 create that comparison is 3. Feeding the generated comparison to
1441 setcond produces the carry flag that we desire. */
1442 disas_jcc(s
, &cmp
, 3);
1443 carry
= tcg_temp_new_i64();
1445 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1447 TCGv_i32 t
= tcg_temp_new_i32();
1448 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1449 tcg_gen_extu_i32_i64(carry
, t
);
1450 tcg_temp_free_i32(t
);
1454 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1455 tcg_temp_free_i64(carry
);
1459 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1461 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1465 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1467 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1471 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1473 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1474 return_low128(o
->out2
);
1478 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1480 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1484 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1486 int shift
= s
->insn
->data
& 0xff;
1487 int size
= s
->insn
->data
>> 8;
1488 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1491 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1492 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1493 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1495 /* Produce the CC from only the bits manipulated. */
1496 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1497 set_cc_nz_u64(s
, cc_dst
);
1501 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1503 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1504 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1505 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1506 per_branch(s
, false);
1507 return EXIT_PC_UPDATED
;
1513 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1515 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1516 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1519 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1521 int m1
= get_field(s
->fields
, m1
);
1522 bool is_imm
= have_field(s
->fields
, i2
);
1523 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1526 /* BCR with R2 = 0 causes no branching */
1527 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1529 /* Perform serialization */
1530 /* FIXME: check for fast-BCR-serialization facility */
1531 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1534 /* Perform serialization */
1535 /* FIXME: perform checkpoint-synchronisation */
1536 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1541 disas_jcc(s
, &c
, m1
);
1542 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1545 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1547 int r1
= get_field(s
->fields
, r1
);
1548 bool is_imm
= have_field(s
->fields
, i2
);
1549 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1553 c
.cond
= TCG_COND_NE
;
1558 t
= tcg_temp_new_i64();
1559 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1560 store_reg32_i64(r1
, t
);
1561 c
.u
.s32
.a
= tcg_temp_new_i32();
1562 c
.u
.s32
.b
= tcg_const_i32(0);
1563 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1564 tcg_temp_free_i64(t
);
1566 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1569 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1571 int r1
= get_field(s
->fields
, r1
);
1572 int imm
= get_field(s
->fields
, i2
);
1576 c
.cond
= TCG_COND_NE
;
1581 t
= tcg_temp_new_i64();
1582 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1583 tcg_gen_subi_i64(t
, t
, 1);
1584 store_reg32h_i64(r1
, t
);
1585 c
.u
.s32
.a
= tcg_temp_new_i32();
1586 c
.u
.s32
.b
= tcg_const_i32(0);
1587 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1588 tcg_temp_free_i64(t
);
1590 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1593 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1595 int r1
= get_field(s
->fields
, r1
);
1596 bool is_imm
= have_field(s
->fields
, i2
);
1597 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1600 c
.cond
= TCG_COND_NE
;
1605 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1606 c
.u
.s64
.a
= regs
[r1
];
1607 c
.u
.s64
.b
= tcg_const_i64(0);
1609 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1612 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1614 int r1
= get_field(s
->fields
, r1
);
1615 int r3
= get_field(s
->fields
, r3
);
1616 bool is_imm
= have_field(s
->fields
, i2
);
1617 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1621 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1626 t
= tcg_temp_new_i64();
1627 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1628 c
.u
.s32
.a
= tcg_temp_new_i32();
1629 c
.u
.s32
.b
= tcg_temp_new_i32();
1630 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1631 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1632 store_reg32_i64(r1
, t
);
1633 tcg_temp_free_i64(t
);
1635 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1638 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1640 int r1
= get_field(s
->fields
, r1
);
1641 int r3
= get_field(s
->fields
, r3
);
1642 bool is_imm
= have_field(s
->fields
, i2
);
1643 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1646 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1649 if (r1
== (r3
| 1)) {
1650 c
.u
.s64
.b
= load_reg(r3
| 1);
1653 c
.u
.s64
.b
= regs
[r3
| 1];
1657 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1658 c
.u
.s64
.a
= regs
[r1
];
1661 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1664 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1666 int imm
, m3
= get_field(s
->fields
, m3
);
1670 c
.cond
= ltgt_cond
[m3
];
1671 if (s
->insn
->data
) {
1672 c
.cond
= tcg_unsigned_cond(c
.cond
);
1674 c
.is_64
= c
.g1
= c
.g2
= true;
1678 is_imm
= have_field(s
->fields
, i4
);
1680 imm
= get_field(s
->fields
, i4
);
1683 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1684 get_field(s
->fields
, d4
));
1687 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1690 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1692 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1697 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1699 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1704 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1706 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1711 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1713 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1714 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1715 tcg_temp_free_i32(m3
);
1716 gen_set_cc_nz_f32(s
, o
->in2
);
1720 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1722 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1723 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1724 tcg_temp_free_i32(m3
);
1725 gen_set_cc_nz_f64(s
, o
->in2
);
1729 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1731 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1732 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1733 tcg_temp_free_i32(m3
);
1734 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1738 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1740 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1741 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1742 tcg_temp_free_i32(m3
);
1743 gen_set_cc_nz_f32(s
, o
->in2
);
1747 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1749 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1750 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1751 tcg_temp_free_i32(m3
);
1752 gen_set_cc_nz_f64(s
, o
->in2
);
1756 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1758 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1759 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1760 tcg_temp_free_i32(m3
);
1761 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1765 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1767 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1768 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1769 tcg_temp_free_i32(m3
);
1770 gen_set_cc_nz_f32(s
, o
->in2
);
1774 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1776 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1777 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1778 tcg_temp_free_i32(m3
);
1779 gen_set_cc_nz_f64(s
, o
->in2
);
1783 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1785 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1786 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1787 tcg_temp_free_i32(m3
);
1788 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1792 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1794 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1795 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1796 tcg_temp_free_i32(m3
);
1797 gen_set_cc_nz_f32(s
, o
->in2
);
1801 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1803 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1804 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1805 tcg_temp_free_i32(m3
);
1806 gen_set_cc_nz_f64(s
, o
->in2
);
1810 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1812 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1813 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1814 tcg_temp_free_i32(m3
);
1815 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1819 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1821 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1822 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1823 tcg_temp_free_i32(m3
);
1827 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1829 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1830 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1831 tcg_temp_free_i32(m3
);
1835 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1837 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1838 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1839 tcg_temp_free_i32(m3
);
1840 return_low128(o
->out2
);
1844 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1846 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1847 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1848 tcg_temp_free_i32(m3
);
1852 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1854 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1855 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1856 tcg_temp_free_i32(m3
);
1860 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1862 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1863 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1864 tcg_temp_free_i32(m3
);
1865 return_low128(o
->out2
);
1869 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1871 int r2
= get_field(s
->fields
, r2
);
1872 TCGv_i64 len
= tcg_temp_new_i64();
1874 potential_page_fault(s
);
1875 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1877 return_low128(o
->out
);
1879 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1880 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1881 tcg_temp_free_i64(len
);
1886 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1888 int l
= get_field(s
->fields
, l1
);
1893 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1894 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1897 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1898 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1901 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1902 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1905 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1906 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1909 vl
= tcg_const_i32(l
);
1910 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1911 tcg_temp_free_i32(vl
);
1915 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1919 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1921 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1922 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1923 potential_page_fault(s
);
1924 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1925 tcg_temp_free_i32(r1
);
1926 tcg_temp_free_i32(r3
);
1931 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1933 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1934 TCGv_i32 t1
= tcg_temp_new_i32();
1935 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1936 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1938 tcg_temp_free_i32(t1
);
1939 tcg_temp_free_i32(m3
);
1943 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1945 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1947 return_low128(o
->in2
);
1951 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1953 TCGv_i64 t
= tcg_temp_new_i64();
1954 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1955 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1956 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1957 tcg_temp_free_i64(t
);
1961 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1963 int d2
= get_field(s
->fields
, d2
);
1964 int b2
= get_field(s
->fields
, b2
);
1967 /* Note that in1 = R3 (new value) and
1968 in2 = (zero-extended) R1 (expected value). */
1970 addr
= get_address(s
, 0, b2
, d2
);
1971 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1972 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1973 tcg_temp_free_i64(addr
);
1975 /* Are the memory and expected values (un)equal? Note that this setcond
1976 produces the output CC value, thus the NE sense of the test. */
1977 cc
= tcg_temp_new_i64();
1978 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1979 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1980 tcg_temp_free_i64(cc
);
1986 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1988 int r1
= get_field(s
->fields
, r1
);
1989 int r3
= get_field(s
->fields
, r3
);
1990 int d2
= get_field(s
->fields
, d2
);
1991 int b2
= get_field(s
->fields
, b2
);
1993 TCGv_i32 t_r1
, t_r3
;
1995 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1996 addr
= get_address(s
, 0, b2
, d2
);
1997 t_r1
= tcg_const_i32(r1
);
1998 t_r3
= tcg_const_i32(r3
);
1999 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2000 tcg_temp_free_i64(addr
);
2001 tcg_temp_free_i32(t_r1
);
2002 tcg_temp_free_i32(t_r3
);
2008 #ifndef CONFIG_USER_ONLY
2009 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2011 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2012 check_privileged(s
);
2013 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
2014 tcg_temp_free_i32(r1
);
2020 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2022 TCGv_i64 t1
= tcg_temp_new_i64();
2023 TCGv_i32 t2
= tcg_temp_new_i32();
2024 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2025 gen_helper_cvd(t1
, t2
);
2026 tcg_temp_free_i32(t2
);
2027 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2028 tcg_temp_free_i64(t1
);
2032 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2034 int m3
= get_field(s
->fields
, m3
);
2035 TCGLabel
*lab
= gen_new_label();
2038 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2039 if (s
->insn
->data
) {
2040 c
= tcg_unsigned_cond(c
);
2042 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2051 #ifndef CONFIG_USER_ONLY
2052 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2054 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2055 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2056 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2058 check_privileged(s
);
2062 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2064 tcg_temp_free_i32(func_code
);
2065 tcg_temp_free_i32(r3
);
2066 tcg_temp_free_i32(r1
);
2071 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2073 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2074 return_low128(o
->out
);
2078 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2080 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2081 return_low128(o
->out
);
2085 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2087 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2088 return_low128(o
->out
);
2092 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2094 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2095 return_low128(o
->out
);
2099 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2101 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2105 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2107 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2111 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2113 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2114 return_low128(o
->out2
);
2118 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2120 int r2
= get_field(s
->fields
, r2
);
2121 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2125 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2127 /* No cache information provided. */
2128 tcg_gen_movi_i64(o
->out
, -1);
2132 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2134 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2138 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2140 int r1
= get_field(s
->fields
, r1
);
2141 int r2
= get_field(s
->fields
, r2
);
2142 TCGv_i64 t
= tcg_temp_new_i64();
2144 /* Note the "subsequently" in the PoO, which implies a defined result
2145 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2146 tcg_gen_shri_i64(t
, psw_mask
, 32);
2147 store_reg32_i64(r1
, t
);
2149 store_reg32_i64(r2
, psw_mask
);
2152 tcg_temp_free_i64(t
);
2156 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2158 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2159 tb->flags, (ab)use the tb->cs_base field as the address of
2160 the template in memory, and grab 8 bits of tb->flags/cflags for
2161 the contents of the register. We would then recognize all this
2162 in gen_intermediate_code_internal, generating code for exactly
2163 one instruction. This new TB then gets executed normally.
2165 On the other hand, this seems to be mostly used for modifying
2166 MVC inside of memcpy, which needs a helper call anyway. So
2167 perhaps this doesn't bear thinking about any further. */
2174 tmp
= tcg_const_i64(s
->next_pc
);
2175 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2176 tcg_temp_free_i64(tmp
);
2181 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2183 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2184 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2185 tcg_temp_free_i32(m3
);
2189 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2191 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2192 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2193 tcg_temp_free_i32(m3
);
2197 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2199 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2200 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2201 return_low128(o
->out2
);
2202 tcg_temp_free_i32(m3
);
2206 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2208 /* We'll use the original input for cc computation, since we get to
2209 compare that against 0, which ought to be better than comparing
2210 the real output against 64. It also lets cc_dst be a convenient
2211 temporary during our computation. */
2212 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2214 /* R1 = IN ? CLZ(IN) : 64. */
2215 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2217 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2218 value by 64, which is undefined. But since the shift is 64 iff the
2219 input is zero, we still get the correct result after and'ing. */
2220 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2221 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2222 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2226 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2228 int m3
= get_field(s
->fields
, m3
);
2229 int pos
, len
, base
= s
->insn
->data
;
2230 TCGv_i64 tmp
= tcg_temp_new_i64();
2235 /* Effectively a 32-bit load. */
2236 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2243 /* Effectively a 16-bit load. */
2244 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2252 /* Effectively an 8-bit load. */
2253 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2258 pos
= base
+ ctz32(m3
) * 8;
2259 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2260 ccm
= ((1ull << len
) - 1) << pos
;
2264 /* This is going to be a sequence of loads and inserts. */
2265 pos
= base
+ 32 - 8;
2269 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2270 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2271 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2274 m3
= (m3
<< 1) & 0xf;
2280 tcg_gen_movi_i64(tmp
, ccm
);
2281 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2282 tcg_temp_free_i64(tmp
);
2286 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2288 int shift
= s
->insn
->data
& 0xff;
2289 int size
= s
->insn
->data
>> 8;
2290 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2294 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2299 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2301 t1
= tcg_temp_new_i64();
2302 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2303 tcg_gen_shri_i64(t1
, t1
, 36);
2304 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2306 tcg_gen_extu_i32_i64(t1
, cc_op
);
2307 tcg_gen_shli_i64(t1
, t1
, 28);
2308 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2309 tcg_temp_free_i64(t1
);
2313 #ifndef CONFIG_USER_ONLY
2314 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2316 check_privileged(s
);
2317 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2321 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2323 check_privileged(s
);
2324 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2329 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2331 /* The real output is indeed the original value in memory;
2332 recompute the addition for the computation of CC. */
2333 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2334 s
->insn
->data
| MO_ALIGN
);
2335 /* However, we need to recompute the addition for setting CC. */
2336 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2340 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2342 /* The real output is indeed the original value in memory;
2343 recompute the addition for the computation of CC. */
2344 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2345 s
->insn
->data
| MO_ALIGN
);
2346 /* However, we need to recompute the operation for setting CC. */
2347 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2351 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2353 /* The real output is indeed the original value in memory;
2354 recompute the addition for the computation of CC. */
2355 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2356 s
->insn
->data
| MO_ALIGN
);
2357 /* However, we need to recompute the operation for setting CC. */
2358 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2362 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2364 /* The real output is indeed the original value in memory;
2365 recompute the addition for the computation of CC. */
2366 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2367 s
->insn
->data
| MO_ALIGN
);
2368 /* However, we need to recompute the operation for setting CC. */
2369 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2373 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2375 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2379 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2381 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2385 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2387 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2391 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2393 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2397 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2399 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2400 return_low128(o
->out2
);
2404 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2406 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2407 return_low128(o
->out2
);
2411 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2413 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2417 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2419 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2423 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2425 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2429 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2431 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2435 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2437 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2441 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2443 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2447 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2449 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2453 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2455 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2459 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2461 TCGLabel
*lab
= gen_new_label();
2462 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2463 /* The value is stored even in case of trap. */
2464 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2470 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2472 TCGLabel
*lab
= gen_new_label();
2473 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2474 /* The value is stored even in case of trap. */
2475 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2481 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2483 TCGLabel
*lab
= gen_new_label();
2484 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2485 /* The value is stored even in case of trap. */
2486 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2492 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2494 TCGLabel
*lab
= gen_new_label();
2495 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2496 /* The value is stored even in case of trap. */
2497 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2503 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2505 TCGLabel
*lab
= gen_new_label();
2506 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2507 /* The value is stored even in case of trap. */
2508 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2514 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2518 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2521 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2525 TCGv_i32 t32
= tcg_temp_new_i32();
2528 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2531 t
= tcg_temp_new_i64();
2532 tcg_gen_extu_i32_i64(t
, t32
);
2533 tcg_temp_free_i32(t32
);
2535 z
= tcg_const_i64(0);
2536 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2537 tcg_temp_free_i64(t
);
2538 tcg_temp_free_i64(z
);
2544 #ifndef CONFIG_USER_ONLY
2545 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2547 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2548 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2549 check_privileged(s
);
2550 potential_page_fault(s
);
2551 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2552 tcg_temp_free_i32(r1
);
2553 tcg_temp_free_i32(r3
);
2557 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2559 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2560 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2561 check_privileged(s
);
2562 potential_page_fault(s
);
2563 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2564 tcg_temp_free_i32(r1
);
2565 tcg_temp_free_i32(r3
);
2569 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2571 check_privileged(s
);
2572 potential_page_fault(s
);
2573 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2578 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2580 check_privileged(s
);
2582 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2586 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2590 check_privileged(s
);
2591 per_breaking_event(s
);
2593 t1
= tcg_temp_new_i64();
2594 t2
= tcg_temp_new_i64();
2595 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2596 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2597 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2598 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2599 tcg_gen_shli_i64(t1
, t1
, 32);
2600 gen_helper_load_psw(cpu_env
, t1
, t2
);
2601 tcg_temp_free_i64(t1
);
2602 tcg_temp_free_i64(t2
);
2603 return EXIT_NORETURN
;
2606 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2610 check_privileged(s
);
2611 per_breaking_event(s
);
2613 t1
= tcg_temp_new_i64();
2614 t2
= tcg_temp_new_i64();
2615 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2616 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2617 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2618 gen_helper_load_psw(cpu_env
, t1
, t2
);
2619 tcg_temp_free_i64(t1
);
2620 tcg_temp_free_i64(t2
);
2621 return EXIT_NORETURN
;
2625 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2627 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2628 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2629 potential_page_fault(s
);
2630 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2631 tcg_temp_free_i32(r1
);
2632 tcg_temp_free_i32(r3
);
2636 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2638 int r1
= get_field(s
->fields
, r1
);
2639 int r3
= get_field(s
->fields
, r3
);
2642 /* Only one register to read. */
2643 t1
= tcg_temp_new_i64();
2644 if (unlikely(r1
== r3
)) {
2645 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2646 store_reg32_i64(r1
, t1
);
2651 /* First load the values of the first and last registers to trigger
2652 possible page faults. */
2653 t2
= tcg_temp_new_i64();
2654 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2655 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2656 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2657 store_reg32_i64(r1
, t1
);
2658 store_reg32_i64(r3
, t2
);
2660 /* Only two registers to read. */
2661 if (((r1
+ 1) & 15) == r3
) {
2667 /* Then load the remaining registers. Page fault can't occur. */
2669 tcg_gen_movi_i64(t2
, 4);
2672 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2673 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2674 store_reg32_i64(r1
, t1
);
2682 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2684 int r1
= get_field(s
->fields
, r1
);
2685 int r3
= get_field(s
->fields
, r3
);
2688 /* Only one register to read. */
2689 t1
= tcg_temp_new_i64();
2690 if (unlikely(r1
== r3
)) {
2691 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2692 store_reg32h_i64(r1
, t1
);
2697 /* First load the values of the first and last registers to trigger
2698 possible page faults. */
2699 t2
= tcg_temp_new_i64();
2700 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2701 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2702 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2703 store_reg32h_i64(r1
, t1
);
2704 store_reg32h_i64(r3
, t2
);
2706 /* Only two registers to read. */
2707 if (((r1
+ 1) & 15) == r3
) {
2713 /* Then load the remaining registers. Page fault can't occur. */
2715 tcg_gen_movi_i64(t2
, 4);
2718 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2719 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2720 store_reg32h_i64(r1
, t1
);
2728 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2730 int r1
= get_field(s
->fields
, r1
);
2731 int r3
= get_field(s
->fields
, r3
);
2734 /* Only one register to read. */
2735 if (unlikely(r1
== r3
)) {
2736 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2740 /* First load the values of the first and last registers to trigger
2741 possible page faults. */
2742 t1
= tcg_temp_new_i64();
2743 t2
= tcg_temp_new_i64();
2744 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2745 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2746 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2747 tcg_gen_mov_i64(regs
[r1
], t1
);
2750 /* Only two registers to read. */
2751 if (((r1
+ 1) & 15) == r3
) {
2756 /* Then load the remaining registers. Page fault can't occur. */
2758 tcg_gen_movi_i64(t1
, 8);
2761 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2762 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2769 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2772 TCGMemOp mop
= s
->insn
->data
;
2774 /* In a parallel context, stop the world and single step. */
2775 if (parallel_cpus
) {
2776 potential_page_fault(s
);
2777 gen_exception(EXCP_ATOMIC
);
2778 return EXIT_NORETURN
;
2781 /* In a serial context, perform the two loads ... */
2782 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2783 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2784 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2785 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2786 tcg_temp_free_i64(a1
);
2787 tcg_temp_free_i64(a2
);
2789 /* ... and indicate that we performed them while interlocked. */
2790 gen_op_movi_cc(s
, 0);
2794 #ifndef CONFIG_USER_ONLY
2795 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2797 check_privileged(s
);
2798 potential_page_fault(s
);
2799 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2803 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2805 check_privileged(s
);
2806 potential_page_fault(s
);
2807 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2812 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2815 o
->g_out
= o
->g_in2
;
2816 TCGV_UNUSED_I64(o
->in2
);
2821 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2823 int b2
= get_field(s
->fields
, b2
);
2824 TCGv ar1
= tcg_temp_new_i64();
2827 o
->g_out
= o
->g_in2
;
2828 TCGV_UNUSED_I64(o
->in2
);
2831 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2832 case PSW_ASC_PRIMARY
>> 32:
2833 tcg_gen_movi_i64(ar1
, 0);
2835 case PSW_ASC_ACCREG
>> 32:
2836 tcg_gen_movi_i64(ar1
, 1);
2838 case PSW_ASC_SECONDARY
>> 32:
2840 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2842 tcg_gen_movi_i64(ar1
, 0);
2845 case PSW_ASC_HOME
>> 32:
2846 tcg_gen_movi_i64(ar1
, 2);
2850 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2851 tcg_temp_free_i64(ar1
);
2856 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2860 o
->g_out
= o
->g_in1
;
2861 o
->g_out2
= o
->g_in2
;
2862 TCGV_UNUSED_I64(o
->in1
);
2863 TCGV_UNUSED_I64(o
->in2
);
2864 o
->g_in1
= o
->g_in2
= false;
2868 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2870 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2871 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2872 tcg_temp_free_i32(l
);
2876 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2878 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2879 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2880 potential_page_fault(s
);
2881 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2882 tcg_temp_free_i32(r1
);
2883 tcg_temp_free_i32(r2
);
2888 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2890 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2891 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2892 potential_page_fault(s
);
2893 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2894 tcg_temp_free_i32(r1
);
2895 tcg_temp_free_i32(r3
);
2900 #ifndef CONFIG_USER_ONLY
2901 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2903 int r1
= get_field(s
->fields
, l1
);
2904 check_privileged(s
);
2905 potential_page_fault(s
);
2906 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2911 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2913 int r1
= get_field(s
->fields
, l1
);
2914 check_privileged(s
);
2915 potential_page_fault(s
);
2916 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2922 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2924 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2929 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2931 potential_page_fault(s
);
2932 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2934 return_low128(o
->in2
);
2938 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2940 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2944 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2946 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2950 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2952 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2956 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2958 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2962 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2964 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2968 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2970 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2971 return_low128(o
->out2
);
2975 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2977 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2978 return_low128(o
->out2
);
2982 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2984 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2985 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2986 tcg_temp_free_i64(r3
);
2990 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2992 int r3
= get_field(s
->fields
, r3
);
2993 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2997 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2999 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3000 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3001 tcg_temp_free_i64(r3
);
3005 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3007 int r3
= get_field(s
->fields
, r3
);
3008 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3012 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3015 z
= tcg_const_i64(0);
3016 n
= tcg_temp_new_i64();
3017 tcg_gen_neg_i64(n
, o
->in2
);
3018 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3019 tcg_temp_free_i64(n
);
3020 tcg_temp_free_i64(z
);
3024 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3026 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3030 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3032 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3036 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3038 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3039 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3043 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3045 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3046 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3047 tcg_temp_free_i32(l
);
3052 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3054 tcg_gen_neg_i64(o
->out
, o
->in2
);
3058 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3060 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3064 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3066 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3070 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3072 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3073 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3077 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3079 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3080 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3081 tcg_temp_free_i32(l
);
3086 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3088 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3092 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3094 int shift
= s
->insn
->data
& 0xff;
3095 int size
= s
->insn
->data
>> 8;
3096 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3099 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3100 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3102 /* Produce the CC from only the bits manipulated. */
3103 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3104 set_cc_nz_u64(s
, cc_dst
);
3108 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3110 gen_helper_popcnt(o
->out
, o
->in2
);
3114 #ifndef CONFIG_USER_ONLY
3115 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3117 check_privileged(s
);
3118 gen_helper_ptlb(cpu_env
);
3123 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3125 int i3
= get_field(s
->fields
, i3
);
3126 int i4
= get_field(s
->fields
, i4
);
3127 int i5
= get_field(s
->fields
, i5
);
3128 int do_zero
= i4
& 0x80;
3129 uint64_t mask
, imask
, pmask
;
3132 /* Adjust the arguments for the specific insn. */
3133 switch (s
->fields
->op2
) {
3134 case 0x55: /* risbg */
3139 case 0x5d: /* risbhg */
3142 pmask
= 0xffffffff00000000ull
;
3144 case 0x51: /* risblg */
3147 pmask
= 0x00000000ffffffffull
;
3153 /* MASK is the set of bits to be inserted from R2.
3154 Take care for I3/I4 wraparound. */
3157 mask
^= pmask
>> i4
>> 1;
3159 mask
|= ~(pmask
>> i4
>> 1);
3163 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3164 insns, we need to keep the other half of the register. */
3165 imask
= ~mask
| ~pmask
;
3167 if (s
->fields
->op2
== 0x55) {
3177 if (s
->fields
->op2
== 0x5d) {
3181 /* In some cases we can implement this with extract. */
3182 if (imask
== 0 && pos
== 0 && len
> 0 && rot
+ len
<= 64) {
3183 tcg_gen_extract_i64(o
->out
, o
->in2
, rot
, len
);
3187 /* In some cases we can implement this with deposit. */
3188 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3189 /* Note that we rotate the bits to be inserted to the lsb, not to
3190 the position as described in the PoO. */
3191 rot
= (rot
- pos
) & 63;
3196 /* Rotate the input as necessary. */
3197 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3199 /* Insert the selected bits into the output. */
3202 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3204 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3206 } else if (imask
== 0) {
3207 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3209 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3210 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3211 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3216 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3218 int i3
= get_field(s
->fields
, i3
);
3219 int i4
= get_field(s
->fields
, i4
);
3220 int i5
= get_field(s
->fields
, i5
);
3223 /* If this is a test-only form, arrange to discard the result. */
3225 o
->out
= tcg_temp_new_i64();
3233 /* MASK is the set of bits to be operated on from R2.
3234 Take care for I3/I4 wraparound. */
3237 mask
^= ~0ull >> i4
>> 1;
3239 mask
|= ~(~0ull >> i4
>> 1);
3242 /* Rotate the input as necessary. */
3243 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3246 switch (s
->fields
->op2
) {
3247 case 0x55: /* AND */
3248 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3249 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3252 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3253 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3255 case 0x57: /* XOR */
3256 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3257 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3264 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3265 set_cc_nz_u64(s
, cc_dst
);
3269 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3271 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3275 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3277 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3281 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3283 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3287 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3289 TCGv_i32 t1
= tcg_temp_new_i32();
3290 TCGv_i32 t2
= tcg_temp_new_i32();
3291 TCGv_i32 to
= tcg_temp_new_i32();
3292 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3293 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3294 tcg_gen_rotl_i32(to
, t1
, t2
);
3295 tcg_gen_extu_i32_i64(o
->out
, to
);
3296 tcg_temp_free_i32(t1
);
3297 tcg_temp_free_i32(t2
);
3298 tcg_temp_free_i32(to
);
3302 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3304 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3308 #ifndef CONFIG_USER_ONLY
3309 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3311 check_privileged(s
);
3312 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3317 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3319 check_privileged(s
);
3320 gen_helper_sacf(cpu_env
, o
->in2
);
3321 /* Addressing mode has changed, so end the block. */
3322 return EXIT_PC_STALE
;
3326 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3328 int sam
= s
->insn
->data
;
3344 /* Bizarre but true, we check the address of the current insn for the
3345 specification exception, not the next to be executed. Thus the PoO
3346 documents that Bad Things Happen two bytes before the end. */
3347 if (s
->pc
& ~mask
) {
3348 gen_program_exception(s
, PGM_SPECIFICATION
);
3349 return EXIT_NORETURN
;
3353 tsam
= tcg_const_i64(sam
);
3354 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3355 tcg_temp_free_i64(tsam
);
3357 /* Always exit the TB, since we (may have) changed execution mode. */
3358 return EXIT_PC_STALE
;
3361 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3363 int r1
= get_field(s
->fields
, r1
);
3364 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3368 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3370 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3374 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3376 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3380 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3382 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3383 return_low128(o
->out2
);
3387 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3389 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3393 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3395 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3399 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3401 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3402 return_low128(o
->out2
);
3406 #ifndef CONFIG_USER_ONLY
3407 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3409 check_privileged(s
);
3410 potential_page_fault(s
);
3411 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3416 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3418 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3419 check_privileged(s
);
3420 potential_page_fault(s
);
3421 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3423 tcg_temp_free_i32(r1
);
3428 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3435 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3437 /* We want to store when the condition is fulfilled, so branch
3438 out when it's not */
3439 c
.cond
= tcg_invert_cond(c
.cond
);
3441 lab
= gen_new_label();
3443 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3445 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3449 r1
= get_field(s
->fields
, r1
);
3450 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3451 if (s
->insn
->data
) {
3452 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3454 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3456 tcg_temp_free_i64(a
);
3462 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3464 uint64_t sign
= 1ull << s
->insn
->data
;
3465 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3466 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3467 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3468 /* The arithmetic left shift is curious in that it does not affect
3469 the sign bit. Copy that over from the source unchanged. */
3470 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3471 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3472 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3476 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3478 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3482 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3484 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3488 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3490 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3494 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3496 gen_helper_sfpc(cpu_env
, o
->in2
);
3500 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3502 gen_helper_sfas(cpu_env
, o
->in2
);
3506 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3508 int b2
= get_field(s
->fields
, b2
);
3509 int d2
= get_field(s
->fields
, d2
);
3510 TCGv_i64 t1
= tcg_temp_new_i64();
3511 TCGv_i64 t2
= tcg_temp_new_i64();
3514 switch (s
->fields
->op2
) {
3515 case 0x99: /* SRNM */
3518 case 0xb8: /* SRNMB */
3521 case 0xb9: /* SRNMT */
3527 mask
= (1 << len
) - 1;
3529 /* Insert the value into the appropriate field of the FPC. */
3531 tcg_gen_movi_i64(t1
, d2
& mask
);
3533 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3534 tcg_gen_andi_i64(t1
, t1
, mask
);
3536 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3537 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3538 tcg_temp_free_i64(t1
);
3540 /* Then install the new FPC to set the rounding mode in fpu_status. */
3541 gen_helper_sfpc(cpu_env
, t2
);
3542 tcg_temp_free_i64(t2
);
3546 #ifndef CONFIG_USER_ONLY
3547 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3549 check_privileged(s
);
3550 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3551 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3555 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3557 check_privileged(s
);
3558 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3562 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3564 check_privileged(s
);
3565 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3569 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3571 check_privileged(s
);
3572 /* ??? Surely cpu address != cpu number. In any case the previous
3573 version of this stored more than the required half-word, so it
3574 is unlikely this has ever been tested. */
3575 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3579 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3581 gen_helper_stck(o
->out
, cpu_env
);
3582 /* ??? We don't implement clock states. */
3583 gen_op_movi_cc(s
, 0);
3587 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3589 TCGv_i64 c1
= tcg_temp_new_i64();
3590 TCGv_i64 c2
= tcg_temp_new_i64();
3591 gen_helper_stck(c1
, cpu_env
);
3592 /* Shift the 64-bit value into its place as a zero-extended
3593 104-bit value. Note that "bit positions 64-103 are always
3594 non-zero so that they compare differently to STCK"; we set
3595 the least significant bit to 1. */
3596 tcg_gen_shli_i64(c2
, c1
, 56);
3597 tcg_gen_shri_i64(c1
, c1
, 8);
3598 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3599 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3600 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3601 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3602 tcg_temp_free_i64(c1
);
3603 tcg_temp_free_i64(c2
);
3604 /* ??? We don't implement clock states. */
3605 gen_op_movi_cc(s
, 0);
3609 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3611 check_privileged(s
);
3612 gen_helper_sckc(cpu_env
, o
->in2
);
3616 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3618 check_privileged(s
);
3619 gen_helper_stckc(o
->out
, cpu_env
);
3623 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3625 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3626 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3627 check_privileged(s
);
3628 potential_page_fault(s
);
3629 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3630 tcg_temp_free_i32(r1
);
3631 tcg_temp_free_i32(r3
);
3635 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3637 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3638 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3639 check_privileged(s
);
3640 potential_page_fault(s
);
3641 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3642 tcg_temp_free_i32(r1
);
3643 tcg_temp_free_i32(r3
);
3647 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3649 TCGv_i64 t1
= tcg_temp_new_i64();
3651 check_privileged(s
);
3652 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3653 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3654 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3655 tcg_temp_free_i64(t1
);
3660 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3662 check_privileged(s
);
3663 gen_helper_spt(cpu_env
, o
->in2
);
3667 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3669 check_privileged(s
);
3670 gen_helper_stfl(cpu_env
);
3674 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3676 check_privileged(s
);
3677 gen_helper_stpt(o
->out
, cpu_env
);
3681 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3683 check_privileged(s
);
3684 potential_page_fault(s
);
3685 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3690 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3692 check_privileged(s
);
3693 gen_helper_spx(cpu_env
, o
->in2
);
3697 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3699 check_privileged(s
);
3700 potential_page_fault(s
);
3701 gen_helper_xsch(cpu_env
, regs
[1]);
3706 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3708 check_privileged(s
);
3709 potential_page_fault(s
);
3710 gen_helper_csch(cpu_env
, regs
[1]);
3715 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3717 check_privileged(s
);
3718 potential_page_fault(s
);
3719 gen_helper_hsch(cpu_env
, regs
[1]);
3724 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3726 check_privileged(s
);
3727 potential_page_fault(s
);
3728 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3733 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3735 check_privileged(s
);
3736 potential_page_fault(s
);
3737 gen_helper_rchp(cpu_env
, regs
[1]);
3742 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3744 check_privileged(s
);
3745 potential_page_fault(s
);
3746 gen_helper_rsch(cpu_env
, regs
[1]);
3751 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3753 check_privileged(s
);
3754 potential_page_fault(s
);
3755 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3760 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3762 check_privileged(s
);
3763 potential_page_fault(s
);
3764 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3769 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3771 check_privileged(s
);
3772 potential_page_fault(s
);
3773 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3778 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3780 check_privileged(s
);
3781 potential_page_fault(s
);
3782 gen_helper_chsc(cpu_env
, o
->in2
);
3787 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3789 check_privileged(s
);
3790 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3791 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3795 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3797 uint64_t i2
= get_field(s
->fields
, i2
);
3800 check_privileged(s
);
3802 /* It is important to do what the instruction name says: STORE THEN.
3803 If we let the output hook perform the store then if we fault and
3804 restart, we'll have the wrong SYSTEM MASK in place. */
3805 t
= tcg_temp_new_i64();
3806 tcg_gen_shri_i64(t
, psw_mask
, 56);
3807 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3808 tcg_temp_free_i64(t
);
3810 if (s
->fields
->op
== 0xac) {
3811 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3812 (i2
<< 56) | 0x00ffffffffffffffull
);
3814 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3819 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3821 check_privileged(s
);
3822 potential_page_fault(s
);
3823 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3827 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3829 check_privileged(s
);
3830 potential_page_fault(s
);
3831 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3836 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
3838 potential_page_fault(s
);
3839 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
3844 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3846 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3850 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3852 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3856 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3858 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3862 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3864 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3868 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3870 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3871 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3872 potential_page_fault(s
);
3873 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3874 tcg_temp_free_i32(r1
);
3875 tcg_temp_free_i32(r3
);
3879 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3881 int m3
= get_field(s
->fields
, m3
);
3882 int pos
, base
= s
->insn
->data
;
3883 TCGv_i64 tmp
= tcg_temp_new_i64();
3885 pos
= base
+ ctz32(m3
) * 8;
3888 /* Effectively a 32-bit store. */
3889 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3890 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3896 /* Effectively a 16-bit store. */
3897 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3898 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3905 /* Effectively an 8-bit store. */
3906 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3907 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3911 /* This is going to be a sequence of shifts and stores. */
3912 pos
= base
+ 32 - 8;
3915 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3916 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3917 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3919 m3
= (m3
<< 1) & 0xf;
3924 tcg_temp_free_i64(tmp
);
3928 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3930 int r1
= get_field(s
->fields
, r1
);
3931 int r3
= get_field(s
->fields
, r3
);
3932 int size
= s
->insn
->data
;
3933 TCGv_i64 tsize
= tcg_const_i64(size
);
3937 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3939 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3944 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3948 tcg_temp_free_i64(tsize
);
3952 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3954 int r1
= get_field(s
->fields
, r1
);
3955 int r3
= get_field(s
->fields
, r3
);
3956 TCGv_i64 t
= tcg_temp_new_i64();
3957 TCGv_i64 t4
= tcg_const_i64(4);
3958 TCGv_i64 t32
= tcg_const_i64(32);
3961 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3962 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3966 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3970 tcg_temp_free_i64(t
);
3971 tcg_temp_free_i64(t4
);
3972 tcg_temp_free_i64(t32
);
3976 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3978 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3980 return_low128(o
->in2
);
3984 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3986 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3990 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3995 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3997 /* The !borrow flag is the msb of CC. Since we want the inverse of
3998 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3999 disas_jcc(s
, &cmp
, 8 | 4);
4000 borrow
= tcg_temp_new_i64();
4002 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4004 TCGv_i32 t
= tcg_temp_new_i32();
4005 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4006 tcg_gen_extu_i32_i64(borrow
, t
);
4007 tcg_temp_free_i32(t
);
4011 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4012 tcg_temp_free_i64(borrow
);
4016 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4023 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4024 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4025 tcg_temp_free_i32(t
);
4027 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
4028 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4029 tcg_temp_free_i32(t
);
4031 gen_exception(EXCP_SVC
);
4032 return EXIT_NORETURN
;
4035 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4037 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4042 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4044 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4049 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4051 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4056 #ifndef CONFIG_USER_ONLY
4058 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4060 check_privileged(s
);
4061 potential_page_fault(s
);
4062 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4067 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4069 potential_page_fault(s
);
4070 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4077 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4079 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4080 potential_page_fault(s
);
4081 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4082 tcg_temp_free_i32(l
);
4087 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4089 potential_page_fault(s
);
4090 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4091 return_low128(o
->out2
);
4096 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4098 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4099 potential_page_fault(s
);
4100 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4101 tcg_temp_free_i32(l
);
4106 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4108 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4109 potential_page_fault(s
);
4110 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4111 tcg_temp_free_i32(l
);
4115 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4117 int d1
= get_field(s
->fields
, d1
);
4118 int d2
= get_field(s
->fields
, d2
);
4119 int b1
= get_field(s
->fields
, b1
);
4120 int b2
= get_field(s
->fields
, b2
);
4121 int l
= get_field(s
->fields
, l1
);
4124 o
->addr1
= get_address(s
, 0, b1
, d1
);
4126 /* If the addresses are identical, this is a store/memset of zero. */
4127 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4128 o
->in2
= tcg_const_i64(0);
4132 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4135 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4139 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4142 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4146 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4149 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4153 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4155 gen_op_movi_cc(s
, 0);
4159 /* But in general we'll defer to a helper. */
4160 o
->in2
= get_address(s
, 0, b2
, d2
);
4161 t32
= tcg_const_i32(l
);
4162 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4163 tcg_temp_free_i32(t32
);
4168 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4170 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4174 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4176 int shift
= s
->insn
->data
& 0xff;
4177 int size
= s
->insn
->data
>> 8;
4178 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4181 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4182 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4184 /* Produce the CC from only the bits manipulated. */
4185 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4186 set_cc_nz_u64(s
, cc_dst
);
4190 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4192 o
->out
= tcg_const_i64(0);
4196 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4198 o
->out
= tcg_const_i64(0);
4204 /* ====================================================================== */
4205 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4206 the original inputs), update the various cc data structures in order to
4207 be able to compute the new condition code. */
4209 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4211 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4214 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4216 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4219 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4221 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4224 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4226 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4229 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4231 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4234 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4236 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4239 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4241 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4244 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4246 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4249 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4251 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4254 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4256 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4259 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4261 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4264 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4266 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4269 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4271 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4274 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4276 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4279 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4281 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4284 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4286 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4289 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4291 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4294 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4296 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4299 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4301 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4304 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4306 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4307 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4310 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4312 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4315 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4317 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4320 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4322 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4325 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4327 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4330 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4332 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4335 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4337 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4340 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4342 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4345 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4347 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4350 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4352 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4355 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4357 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4360 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4362 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4365 /* ====================================================================== */
4366 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4367 with the TCG register to which we will write. Used in combination with
4368 the "wout" generators, in some cases we need a new temporary, and in
4369 some cases we can write to a TCG global. */
4371 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4373 o
->out
= tcg_temp_new_i64();
4375 #define SPEC_prep_new 0
4377 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4379 o
->out
= tcg_temp_new_i64();
4380 o
->out2
= tcg_temp_new_i64();
4382 #define SPEC_prep_new_P 0
4384 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4386 o
->out
= regs
[get_field(f
, r1
)];
4389 #define SPEC_prep_r1 0
4391 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4393 int r1
= get_field(f
, r1
);
4395 o
->out2
= regs
[r1
+ 1];
4396 o
->g_out
= o
->g_out2
= true;
4398 #define SPEC_prep_r1_P SPEC_r1_even
4400 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4402 o
->out
= fregs
[get_field(f
, r1
)];
4405 #define SPEC_prep_f1 0
4407 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4409 int r1
= get_field(f
, r1
);
4411 o
->out2
= fregs
[r1
+ 2];
4412 o
->g_out
= o
->g_out2
= true;
4414 #define SPEC_prep_x1 SPEC_r1_f128
4416 /* ====================================================================== */
4417 /* The "Write OUTput" generators. These generally perform some non-trivial
4418 copy of data to TCG globals, or to main memory. The trivial cases are
4419 generally handled by having a "prep" generator install the TCG global
4420 as the destination of the operation. */
4422 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4424 store_reg(get_field(f
, r1
), o
->out
);
4426 #define SPEC_wout_r1 0
4428 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4430 int r1
= get_field(f
, r1
);
4431 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4433 #define SPEC_wout_r1_8 0
4435 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4437 int r1
= get_field(f
, r1
);
4438 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4440 #define SPEC_wout_r1_16 0
4442 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4444 store_reg32_i64(get_field(f
, r1
), o
->out
);
4446 #define SPEC_wout_r1_32 0
4448 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4450 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4452 #define SPEC_wout_r1_32h 0
4454 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4456 int r1
= get_field(f
, r1
);
4457 store_reg32_i64(r1
, o
->out
);
4458 store_reg32_i64(r1
+ 1, o
->out2
);
4460 #define SPEC_wout_r1_P32 SPEC_r1_even
4462 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4464 int r1
= get_field(f
, r1
);
4465 store_reg32_i64(r1
+ 1, o
->out
);
4466 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4467 store_reg32_i64(r1
, o
->out
);
4469 #define SPEC_wout_r1_D32 SPEC_r1_even
4471 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4473 int r3
= get_field(f
, r3
);
4474 store_reg32_i64(r3
, o
->out
);
4475 store_reg32_i64(r3
+ 1, o
->out2
);
4477 #define SPEC_wout_r3_P32 SPEC_r3_even
4479 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4481 int r3
= get_field(f
, r3
);
4482 store_reg(r3
, o
->out
);
4483 store_reg(r3
+ 1, o
->out2
);
4485 #define SPEC_wout_r3_P64 SPEC_r3_even
4487 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4489 store_freg32_i64(get_field(f
, r1
), o
->out
);
4491 #define SPEC_wout_e1 0
4493 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4495 store_freg(get_field(f
, r1
), o
->out
);
4497 #define SPEC_wout_f1 0
4499 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4501 int f1
= get_field(s
->fields
, r1
);
4502 store_freg(f1
, o
->out
);
4503 store_freg(f1
+ 2, o
->out2
);
4505 #define SPEC_wout_x1 SPEC_r1_f128
4507 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4509 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4510 store_reg32_i64(get_field(f
, r1
), o
->out
);
4513 #define SPEC_wout_cond_r1r2_32 0
4515 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4517 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4518 store_freg32_i64(get_field(f
, r1
), o
->out
);
4521 #define SPEC_wout_cond_e1e2 0
4523 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4525 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4527 #define SPEC_wout_m1_8 0
4529 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4531 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4533 #define SPEC_wout_m1_16 0
4535 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4537 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4539 #define SPEC_wout_m1_32 0
4541 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4543 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4545 #define SPEC_wout_m1_64 0
4547 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4549 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4551 #define SPEC_wout_m2_32 0
4553 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4555 store_reg(get_field(f
, r1
), o
->in2
);
4557 #define SPEC_wout_in2_r1 0
4559 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4561 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4563 #define SPEC_wout_in2_r1_32 0
4565 /* ====================================================================== */
4566 /* The "INput 1" generators. These load the first operand to an insn. */
4568 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4570 o
->in1
= load_reg(get_field(f
, r1
));
4572 #define SPEC_in1_r1 0
4574 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4576 o
->in1
= regs
[get_field(f
, r1
)];
4579 #define SPEC_in1_r1_o 0
4581 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4583 o
->in1
= tcg_temp_new_i64();
4584 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4586 #define SPEC_in1_r1_32s 0
4588 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4590 o
->in1
= tcg_temp_new_i64();
4591 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4593 #define SPEC_in1_r1_32u 0
4595 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4597 o
->in1
= tcg_temp_new_i64();
4598 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4600 #define SPEC_in1_r1_sr32 0
4602 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4604 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4606 #define SPEC_in1_r1p1 SPEC_r1_even
4608 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4610 o
->in1
= tcg_temp_new_i64();
4611 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4613 #define SPEC_in1_r1p1_32s SPEC_r1_even
4615 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4617 o
->in1
= tcg_temp_new_i64();
4618 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4620 #define SPEC_in1_r1p1_32u SPEC_r1_even
4622 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4624 int r1
= get_field(f
, r1
);
4625 o
->in1
= tcg_temp_new_i64();
4626 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4628 #define SPEC_in1_r1_D32 SPEC_r1_even
4630 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4632 o
->in1
= load_reg(get_field(f
, r2
));
4634 #define SPEC_in1_r2 0
4636 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4638 o
->in1
= tcg_temp_new_i64();
4639 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4641 #define SPEC_in1_r2_sr32 0
4643 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4645 o
->in1
= load_reg(get_field(f
, r3
));
4647 #define SPEC_in1_r3 0
4649 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4651 o
->in1
= regs
[get_field(f
, r3
)];
4654 #define SPEC_in1_r3_o 0
4656 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4658 o
->in1
= tcg_temp_new_i64();
4659 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4661 #define SPEC_in1_r3_32s 0
4663 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4665 o
->in1
= tcg_temp_new_i64();
4666 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4668 #define SPEC_in1_r3_32u 0
4670 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4672 int r3
= get_field(f
, r3
);
4673 o
->in1
= tcg_temp_new_i64();
4674 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4676 #define SPEC_in1_r3_D32 SPEC_r3_even
4678 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4680 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4682 #define SPEC_in1_e1 0
4684 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4686 o
->in1
= fregs
[get_field(f
, r1
)];
4689 #define SPEC_in1_f1_o 0
4691 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4693 int r1
= get_field(f
, r1
);
4695 o
->out2
= fregs
[r1
+ 2];
4696 o
->g_out
= o
->g_out2
= true;
4698 #define SPEC_in1_x1_o SPEC_r1_f128
4700 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4702 o
->in1
= fregs
[get_field(f
, r3
)];
4705 #define SPEC_in1_f3_o 0
4707 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4709 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4711 #define SPEC_in1_la1 0
4713 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4715 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4716 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4718 #define SPEC_in1_la2 0
4720 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4723 o
->in1
= tcg_temp_new_i64();
4724 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4726 #define SPEC_in1_m1_8u 0
4728 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4731 o
->in1
= tcg_temp_new_i64();
4732 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4734 #define SPEC_in1_m1_16s 0
4736 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4739 o
->in1
= tcg_temp_new_i64();
4740 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4742 #define SPEC_in1_m1_16u 0
4744 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4747 o
->in1
= tcg_temp_new_i64();
4748 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4750 #define SPEC_in1_m1_32s 0
4752 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4755 o
->in1
= tcg_temp_new_i64();
4756 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4758 #define SPEC_in1_m1_32u 0
4760 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4763 o
->in1
= tcg_temp_new_i64();
4764 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4766 #define SPEC_in1_m1_64 0
4768 /* ====================================================================== */
4769 /* The "INput 2" generators. These load the second operand to an insn. */
4771 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4773 o
->in2
= regs
[get_field(f
, r1
)];
4776 #define SPEC_in2_r1_o 0
4778 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4780 o
->in2
= tcg_temp_new_i64();
4781 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4783 #define SPEC_in2_r1_16u 0
4785 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4787 o
->in2
= tcg_temp_new_i64();
4788 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4790 #define SPEC_in2_r1_32u 0
4792 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4794 int r1
= get_field(f
, r1
);
4795 o
->in2
= tcg_temp_new_i64();
4796 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4798 #define SPEC_in2_r1_D32 SPEC_r1_even
4800 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4802 o
->in2
= load_reg(get_field(f
, r2
));
4804 #define SPEC_in2_r2 0
4806 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4808 o
->in2
= regs
[get_field(f
, r2
)];
4811 #define SPEC_in2_r2_o 0
4813 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4815 int r2
= get_field(f
, r2
);
4817 o
->in2
= load_reg(r2
);
4820 #define SPEC_in2_r2_nz 0
4822 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4824 o
->in2
= tcg_temp_new_i64();
4825 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4827 #define SPEC_in2_r2_8s 0
4829 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4831 o
->in2
= tcg_temp_new_i64();
4832 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4834 #define SPEC_in2_r2_8u 0
4836 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4838 o
->in2
= tcg_temp_new_i64();
4839 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4841 #define SPEC_in2_r2_16s 0
4843 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4845 o
->in2
= tcg_temp_new_i64();
4846 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4848 #define SPEC_in2_r2_16u 0
4850 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4852 o
->in2
= load_reg(get_field(f
, r3
));
4854 #define SPEC_in2_r3 0
4856 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4858 o
->in2
= tcg_temp_new_i64();
4859 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4861 #define SPEC_in2_r3_sr32 0
4863 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4865 o
->in2
= tcg_temp_new_i64();
4866 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4868 #define SPEC_in2_r2_32s 0
4870 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4872 o
->in2
= tcg_temp_new_i64();
4873 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4875 #define SPEC_in2_r2_32u 0
4877 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4879 o
->in2
= tcg_temp_new_i64();
4880 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4882 #define SPEC_in2_r2_sr32 0
4884 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4886 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4888 #define SPEC_in2_e2 0
4890 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4892 o
->in2
= fregs
[get_field(f
, r2
)];
4895 #define SPEC_in2_f2_o 0
4897 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4899 int r2
= get_field(f
, r2
);
4901 o
->in2
= fregs
[r2
+ 2];
4902 o
->g_in1
= o
->g_in2
= true;
4904 #define SPEC_in2_x2_o SPEC_r2_f128
4906 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4908 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4910 #define SPEC_in2_ra2 0
4912 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4914 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4915 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4917 #define SPEC_in2_a2 0
4919 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4921 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4923 #define SPEC_in2_ri2 0
4925 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4927 help_l2_shift(s
, f
, o
, 31);
4929 #define SPEC_in2_sh32 0
4931 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4933 help_l2_shift(s
, f
, o
, 63);
4935 #define SPEC_in2_sh64 0
4937 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4940 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4942 #define SPEC_in2_m2_8u 0
4944 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4947 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4949 #define SPEC_in2_m2_16s 0
4951 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4954 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4956 #define SPEC_in2_m2_16u 0
4958 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4961 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4963 #define SPEC_in2_m2_32s 0
4965 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4968 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4970 #define SPEC_in2_m2_32u 0
4972 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4975 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4977 #define SPEC_in2_m2_64 0
4979 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4982 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4984 #define SPEC_in2_mri2_16u 0
4986 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4989 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4991 #define SPEC_in2_mri2_32s 0
4993 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4996 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4998 #define SPEC_in2_mri2_32u 0
5000 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5003 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5005 #define SPEC_in2_mri2_64 0
5007 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5009 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5011 #define SPEC_in2_i2 0
5013 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5015 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5017 #define SPEC_in2_i2_8u 0
5019 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5021 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5023 #define SPEC_in2_i2_16u 0
5025 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5027 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5029 #define SPEC_in2_i2_32u 0
5031 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5033 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5034 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5036 #define SPEC_in2_i2_16u_shl 0
5038 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5040 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5041 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5043 #define SPEC_in2_i2_32u_shl 0
5045 #ifndef CONFIG_USER_ONLY
5046 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5048 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5050 #define SPEC_in2_insn 0
5053 /* ====================================================================== */
5055 /* Find opc within the table of insns. This is formulated as a switch
5056 statement so that (1) we get compile-time notice of cut-paste errors
5057 for duplicated opcodes, and (2) the compiler generates the binary
5058 search tree, rather than us having to post-process the table. */
5060 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5061 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5063 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5065 enum DisasInsnEnum
{
5066 #include "insn-data.def"
5070 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5074 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5076 .help_in1 = in1_##I1, \
5077 .help_in2 = in2_##I2, \
5078 .help_prep = prep_##P, \
5079 .help_wout = wout_##W, \
5080 .help_cout = cout_##CC, \
5081 .help_op = op_##OP, \
5085 /* Allow 0 to be used for NULL in the table below. */
5093 #define SPEC_in1_0 0
5094 #define SPEC_in2_0 0
5095 #define SPEC_prep_0 0
5096 #define SPEC_wout_0 0
5098 static const DisasInsn insn_info
[] = {
5099 #include "insn-data.def"
5103 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5104 case OPC: return &insn_info[insn_ ## NM];
5106 static const DisasInsn
*lookup_opc(uint16_t opc
)
5109 #include "insn-data.def"
5118 /* Extract a field from the insn. The INSN should be left-aligned in
5119 the uint64_t so that we can more easily utilize the big-bit-endian
5120 definitions we extract from the Principals of Operation. */
5122 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5130 /* Zero extract the field from the insn. */
5131 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5133 /* Sign-extend, or un-swap the field as necessary. */
5135 case 0: /* unsigned */
5137 case 1: /* signed */
5138 assert(f
->size
<= 32);
5139 m
= 1u << (f
->size
- 1);
5142 case 2: /* dl+dh split, signed 20 bit. */
5143 r
= ((int8_t)r
<< 12) | (r
>> 8);
5149 /* Validate that the "compressed" encoding we selected above is valid.
5150 I.e. we havn't make two different original fields overlap. */
5151 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5152 o
->presentC
|= 1 << f
->indexC
;
5153 o
->presentO
|= 1 << f
->indexO
;
5155 o
->c
[f
->indexC
] = r
;
5158 /* Lookup the insn at the current PC, extracting the operands into O and
5159 returning the info struct for the insn. Returns NULL for invalid insn. */
5161 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5164 uint64_t insn
, pc
= s
->pc
;
5166 const DisasInsn
*info
;
5168 insn
= ld_code2(env
, pc
);
5169 op
= (insn
>> 8) & 0xff;
5170 ilen
= get_ilen(op
);
5171 s
->next_pc
= s
->pc
+ ilen
;
5178 insn
= ld_code4(env
, pc
) << 32;
5181 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5187 /* We can't actually determine the insn format until we've looked up
5188 the full insn opcode. Which we can't do without locating the
5189 secondary opcode. Assume by default that OP2 is at bit 40; for
5190 those smaller insns that don't actually have a secondary opcode
5191 this will correctly result in OP2 = 0. */
5197 case 0xb2: /* S, RRF, RRE */
5198 case 0xb3: /* RRE, RRD, RRF */
5199 case 0xb9: /* RRE, RRF */
5200 case 0xe5: /* SSE, SIL */
5201 op2
= (insn
<< 8) >> 56;
5205 case 0xc0: /* RIL */
5206 case 0xc2: /* RIL */
5207 case 0xc4: /* RIL */
5208 case 0xc6: /* RIL */
5209 case 0xc8: /* SSF */
5210 case 0xcc: /* RIL */
5211 op2
= (insn
<< 12) >> 60;
5213 case 0xd0 ... 0xdf: /* SS */
5219 case 0xee ... 0xf3: /* SS */
5220 case 0xf8 ... 0xfd: /* SS */
5224 op2
= (insn
<< 40) >> 56;
5228 memset(f
, 0, sizeof(*f
));
5233 /* Lookup the instruction. */
5234 info
= lookup_opc(op
<< 8 | op2
);
5236 /* If we found it, extract the operands. */
5238 DisasFormat fmt
= info
->fmt
;
5241 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5242 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5248 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5250 const DisasInsn
*insn
;
5251 ExitStatus ret
= NO_EXIT
;
5255 /* Search for the insn in the table. */
5256 insn
= extract_insn(env
, s
, &f
);
5258 /* Not found means unimplemented/illegal opcode. */
5260 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5262 gen_illegal_opcode(s
);
5263 return EXIT_NORETURN
;
5266 #ifndef CONFIG_USER_ONLY
5267 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5268 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5269 gen_helper_per_ifetch(cpu_env
, addr
);
5270 tcg_temp_free_i64(addr
);
5274 /* Check for insn specification exceptions. */
5276 int spec
= insn
->spec
, excp
= 0, r
;
5278 if (spec
& SPEC_r1_even
) {
5279 r
= get_field(&f
, r1
);
5281 excp
= PGM_SPECIFICATION
;
5284 if (spec
& SPEC_r2_even
) {
5285 r
= get_field(&f
, r2
);
5287 excp
= PGM_SPECIFICATION
;
5290 if (spec
& SPEC_r3_even
) {
5291 r
= get_field(&f
, r3
);
5293 excp
= PGM_SPECIFICATION
;
5296 if (spec
& SPEC_r1_f128
) {
5297 r
= get_field(&f
, r1
);
5299 excp
= PGM_SPECIFICATION
;
5302 if (spec
& SPEC_r2_f128
) {
5303 r
= get_field(&f
, r2
);
5305 excp
= PGM_SPECIFICATION
;
5309 gen_program_exception(s
, excp
);
5310 return EXIT_NORETURN
;
5314 /* Set up the strutures we use to communicate with the helpers. */
5317 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5318 TCGV_UNUSED_I64(o
.out
);
5319 TCGV_UNUSED_I64(o
.out2
);
5320 TCGV_UNUSED_I64(o
.in1
);
5321 TCGV_UNUSED_I64(o
.in2
);
5322 TCGV_UNUSED_I64(o
.addr1
);
5324 /* Implement the instruction. */
5325 if (insn
->help_in1
) {
5326 insn
->help_in1(s
, &f
, &o
);
5328 if (insn
->help_in2
) {
5329 insn
->help_in2(s
, &f
, &o
);
5331 if (insn
->help_prep
) {
5332 insn
->help_prep(s
, &f
, &o
);
5334 if (insn
->help_op
) {
5335 ret
= insn
->help_op(s
, &o
);
5337 if (insn
->help_wout
) {
5338 insn
->help_wout(s
, &f
, &o
);
5340 if (insn
->help_cout
) {
5341 insn
->help_cout(s
, &o
);
5344 /* Free any temporaries created by the helpers. */
5345 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5346 tcg_temp_free_i64(o
.out
);
5348 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5349 tcg_temp_free_i64(o
.out2
);
5351 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5352 tcg_temp_free_i64(o
.in1
);
5354 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5355 tcg_temp_free_i64(o
.in2
);
5357 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5358 tcg_temp_free_i64(o
.addr1
);
5361 #ifndef CONFIG_USER_ONLY
5362 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5363 /* An exception might be triggered, save PSW if not already done. */
5364 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5365 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5371 /* Call the helper to check for a possible PER exception. */
5372 gen_helper_per_check_exception(cpu_env
);
5376 /* Advance to the next instruction. */
5381 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5383 S390CPU
*cpu
= s390_env_get_cpu(env
);
5384 CPUState
*cs
= CPU(cpu
);
5386 target_ulong pc_start
;
5387 uint64_t next_page_start
;
5388 int num_insns
, max_insns
;
5395 if (!(tb
->flags
& FLAG_MASK_64
)) {
5396 pc_start
&= 0x7fffffff;
5401 dc
.cc_op
= CC_OP_DYNAMIC
;
5402 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5404 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5407 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5408 if (max_insns
== 0) {
5409 max_insns
= CF_COUNT_MASK
;
5411 if (max_insns
> TCG_MAX_INSNS
) {
5412 max_insns
= TCG_MAX_INSNS
;
5418 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5421 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5422 status
= EXIT_PC_STALE
;
5424 /* The address covered by the breakpoint must be included in
5425 [tb->pc, tb->pc + tb->size) in order to for it to be
5426 properly cleared -- thus we increment the PC here so that
5427 the logic setting tb->size below does the right thing. */
5432 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5437 if (status
== NO_EXIT
) {
5438 status
= translate_one(env
, &dc
);
5441 /* If we reach a page boundary, are single stepping,
5442 or exhaust instruction count, stop generation. */
5443 if (status
== NO_EXIT
5444 && (dc
.pc
>= next_page_start
5445 || tcg_op_buf_full()
5446 || num_insns
>= max_insns
5448 || cs
->singlestep_enabled
)) {
5449 status
= EXIT_PC_STALE
;
5451 } while (status
== NO_EXIT
);
5453 if (tb
->cflags
& CF_LAST_IO
) {
5462 update_psw_addr(&dc
);
5464 case EXIT_PC_UPDATED
:
5465 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5466 cc op type is in env */
5468 /* Exit the TB, either by raising a debug exception or by return. */
5470 gen_exception(EXCP_DEBUG
);
5471 } else if (use_exit_tb(&dc
)) {
5474 tcg_gen_lookup_and_goto_ptr(psw_addr
);
5481 gen_tb_end(tb
, num_insns
);
5483 tb
->size
= dc
.pc
- pc_start
;
5484 tb
->icount
= num_insns
;
5486 #if defined(S390X_DEBUG_DISAS)
5487 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5488 && qemu_log_in_addr_range(pc_start
)) {
5490 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5491 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5498 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5501 int cc_op
= data
[1];
5502 env
->psw
.addr
= data
[0];
5503 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {