4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
36 #include "qemu/host-utils.h"
37 #include "exec/cpu_ldst.h"
39 /* global register indexes */
40 static TCGv_ptr cpu_env
;
42 #include "exec/gen-icount.h"
43 #include "exec/helper-proto.h"
44 #include "exec/helper-gen.h"
46 #include "trace-tcg.h"
49 /* Information that (most) every instruction needs to manipulate. */
50 typedef struct DisasContext DisasContext
;
51 typedef struct DisasInsn DisasInsn
;
52 typedef struct DisasFields DisasFields
;
55 struct TranslationBlock
*tb
;
56 const DisasInsn
*insn
;
60 bool singlestep_enabled
;
63 /* Information carried about a condition to be evaluated. */
70 struct { TCGv_i64 a
, b
; } s64
;
71 struct { TCGv_i32 a
, b
; } s32
;
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
84 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
85 if (s
->tb
->flags
& FLAG_MASK_32
) {
86 return pc
| 0x80000000;
92 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
95 S390CPU
*cpu
= S390_CPU(cs
);
96 CPUS390XState
*env
= &cpu
->env
;
100 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
101 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
104 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
107 for (i
= 0; i
< 16; i
++) {
108 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
110 cpu_fprintf(f
, "\n");
116 for (i
= 0; i
< 16; i
++) {
117 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
119 cpu_fprintf(f
, "\n");
125 for (i
= 0; i
< 32; i
++) {
126 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
127 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
128 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
131 #ifndef CONFIG_USER_ONLY
132 for (i
= 0; i
< 16; i
++) {
133 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
135 cpu_fprintf(f
, "\n");
142 #ifdef DEBUG_INLINE_BRANCHES
143 for (i
= 0; i
< CC_OP_MAX
; i
++) {
144 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
145 inline_branch_miss
[i
], inline_branch_hit
[i
]);
149 cpu_fprintf(f
, "\n");
152 static TCGv_i64 psw_addr
;
153 static TCGv_i64 psw_mask
;
154 static TCGv_i64 gbea
;
156 static TCGv_i32 cc_op
;
157 static TCGv_i64 cc_src
;
158 static TCGv_i64 cc_dst
;
159 static TCGv_i64 cc_vr
;
161 static char cpu_reg_names
[32][4];
162 static TCGv_i64 regs
[16];
163 static TCGv_i64 fregs
[16];
165 void s390x_translate_init(void)
169 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
170 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
171 offsetof(CPUS390XState
, psw
.addr
),
173 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
174 offsetof(CPUS390XState
, psw
.mask
),
176 gbea
= tcg_global_mem_new_i64(TCG_AREG0
,
177 offsetof(CPUS390XState
, gbea
),
180 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
182 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
184 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
186 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
189 for (i
= 0; i
< 16; i
++) {
190 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
191 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
192 offsetof(CPUS390XState
, regs
[i
]),
196 for (i
= 0; i
< 16; i
++) {
197 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
198 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
199 offsetof(CPUS390XState
, vregs
[i
][0].d
),
200 cpu_reg_names
[i
+ 16]);
204 static TCGv_i64
load_reg(int reg
)
206 TCGv_i64 r
= tcg_temp_new_i64();
207 tcg_gen_mov_i64(r
, regs
[reg
]);
211 static TCGv_i64
load_freg32_i64(int reg
)
213 TCGv_i64 r
= tcg_temp_new_i64();
214 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
218 static void store_reg(int reg
, TCGv_i64 v
)
220 tcg_gen_mov_i64(regs
[reg
], v
);
223 static void store_freg(int reg
, TCGv_i64 v
)
225 tcg_gen_mov_i64(fregs
[reg
], v
);
228 static void store_reg32_i64(int reg
, TCGv_i64 v
)
230 /* 32 bit register writes keep the upper half */
231 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
234 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
236 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
239 static void store_freg32_i64(int reg
, TCGv_i64 v
)
241 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
244 static void return_low128(TCGv_i64 dest
)
246 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
249 static void update_psw_addr(DisasContext
*s
)
252 tcg_gen_movi_i64(psw_addr
, s
->pc
);
255 static void per_branch(DisasContext
*s
, bool to_next
)
257 #ifndef CONFIG_USER_ONLY
258 tcg_gen_movi_i64(gbea
, s
->pc
);
260 if (s
->tb
->flags
& FLAG_MASK_PER
) {
261 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
262 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
264 tcg_temp_free_i64(next_pc
);
270 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
271 TCGv_i64 arg1
, TCGv_i64 arg2
)
273 #ifndef CONFIG_USER_ONLY
274 if (s
->tb
->flags
& FLAG_MASK_PER
) {
275 TCGLabel
*lab
= gen_new_label();
276 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
278 tcg_gen_movi_i64(gbea
, s
->pc
);
279 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
283 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
284 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
285 tcg_temp_free_i64(pc
);
290 static void per_breaking_event(DisasContext
*s
)
292 tcg_gen_movi_i64(gbea
, s
->pc
);
295 static void update_cc_op(DisasContext
*s
)
297 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
298 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
302 static void potential_page_fault(DisasContext
*s
)
308 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
310 return (uint64_t)cpu_lduw_code(env
, pc
);
313 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
315 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
318 static int get_mem_index(DisasContext
*s
)
320 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
321 case PSW_ASC_PRIMARY
>> 32:
323 case PSW_ASC_SECONDARY
>> 32:
325 case PSW_ASC_HOME
>> 32:
333 static void gen_exception(int excp
)
335 TCGv_i32 tmp
= tcg_const_i32(excp
);
336 gen_helper_exception(cpu_env
, tmp
);
337 tcg_temp_free_i32(tmp
);
340 static void gen_program_exception(DisasContext
*s
, int code
)
344 /* Remember what pgm exeption this was. */
345 tmp
= tcg_const_i32(code
);
346 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
347 tcg_temp_free_i32(tmp
);
349 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
350 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
351 tcg_temp_free_i32(tmp
);
353 /* Advance past instruction. */
360 /* Trigger exception. */
361 gen_exception(EXCP_PGM
);
364 static inline void gen_illegal_opcode(DisasContext
*s
)
366 gen_program_exception(s
, PGM_OPERATION
);
369 static inline void gen_trap(DisasContext
*s
)
373 /* Set DXC to 0xff. */
374 t
= tcg_temp_new_i32();
375 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
376 tcg_gen_ori_i32(t
, t
, 0xff00);
377 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
378 tcg_temp_free_i32(t
);
380 gen_program_exception(s
, PGM_DATA
);
383 #ifndef CONFIG_USER_ONLY
384 static void check_privileged(DisasContext
*s
)
386 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
387 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
394 TCGv_i64 tmp
= tcg_temp_new_i64();
395 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
397 /* Note that d2 is limited to 20 bits, signed. If we crop negative
398 displacements early we create larger immedate addends. */
400 /* Note that addi optimizes the imm==0 case. */
402 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
403 tcg_gen_addi_i64(tmp
, tmp
, d2
);
405 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
407 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
413 tcg_gen_movi_i64(tmp
, d2
);
416 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
422 static inline bool live_cc_data(DisasContext
*s
)
424 return (s
->cc_op
!= CC_OP_DYNAMIC
425 && s
->cc_op
!= CC_OP_STATIC
429 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
431 if (live_cc_data(s
)) {
432 tcg_gen_discard_i64(cc_src
);
433 tcg_gen_discard_i64(cc_dst
);
434 tcg_gen_discard_i64(cc_vr
);
436 s
->cc_op
= CC_OP_CONST0
+ val
;
439 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
441 if (live_cc_data(s
)) {
442 tcg_gen_discard_i64(cc_src
);
443 tcg_gen_discard_i64(cc_vr
);
445 tcg_gen_mov_i64(cc_dst
, dst
);
449 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
452 if (live_cc_data(s
)) {
453 tcg_gen_discard_i64(cc_vr
);
455 tcg_gen_mov_i64(cc_src
, src
);
456 tcg_gen_mov_i64(cc_dst
, dst
);
460 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
461 TCGv_i64 dst
, TCGv_i64 vr
)
463 tcg_gen_mov_i64(cc_src
, src
);
464 tcg_gen_mov_i64(cc_dst
, dst
);
465 tcg_gen_mov_i64(cc_vr
, vr
);
469 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
471 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
474 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
476 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
479 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
481 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
484 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
486 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
489 /* CC value is in env->cc_op */
490 static void set_cc_static(DisasContext
*s
)
492 if (live_cc_data(s
)) {
493 tcg_gen_discard_i64(cc_src
);
494 tcg_gen_discard_i64(cc_dst
);
495 tcg_gen_discard_i64(cc_vr
);
497 s
->cc_op
= CC_OP_STATIC
;
500 /* calculates cc into cc_op */
501 static void gen_op_calc_cc(DisasContext
*s
)
503 TCGv_i32 local_cc_op
;
506 TCGV_UNUSED_I32(local_cc_op
);
507 TCGV_UNUSED_I64(dummy
);
510 dummy
= tcg_const_i64(0);
524 local_cc_op
= tcg_const_i32(s
->cc_op
);
540 /* s->cc_op is the cc value */
541 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
544 /* env->cc_op already is the cc value */
559 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
564 case CC_OP_LTUGTU_32
:
565 case CC_OP_LTUGTU_64
:
572 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
587 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
590 /* unknown operation - assume 3 arguments and cc_op in env */
591 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
597 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
598 tcg_temp_free_i32(local_cc_op
);
600 if (!TCGV_IS_UNUSED_I64(dummy
)) {
601 tcg_temp_free_i64(dummy
);
604 /* We now have cc in cc_op as constant */
608 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
610 /* NOTE: we handle the case where the TB spans two pages here */
611 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
612 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
613 && !s
->singlestep_enabled
614 && !(s
->tb
->cflags
& CF_LAST_IO
)
615 && !(s
->tb
->flags
& FLAG_MASK_PER
));
618 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
620 #ifdef DEBUG_INLINE_BRANCHES
621 inline_branch_miss
[cc_op
]++;
625 static void account_inline_branch(DisasContext
*s
, int cc_op
)
627 #ifdef DEBUG_INLINE_BRANCHES
628 inline_branch_hit
[cc_op
]++;
632 /* Table of mask values to comparison codes, given a comparison as input.
633 For such, CC=3 should not be possible. */
634 static const TCGCond ltgt_cond
[16] = {
635 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
636 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
637 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
638 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
639 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
640 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
641 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
642 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
645 /* Table of mask values to comparison codes, given a logic op as input.
646 For such, only CC=0 and CC=1 should be possible. */
647 static const TCGCond nz_cond
[16] = {
648 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
649 TCG_COND_NEVER
, TCG_COND_NEVER
,
650 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
651 TCG_COND_NE
, TCG_COND_NE
,
652 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
653 TCG_COND_EQ
, TCG_COND_EQ
,
654 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
655 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
658 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
659 details required to generate a TCG comparison. */
660 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
663 enum cc_op old_cc_op
= s
->cc_op
;
665 if (mask
== 15 || mask
== 0) {
666 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
669 c
->g1
= c
->g2
= true;
674 /* Find the TCG condition for the mask + cc op. */
680 cond
= ltgt_cond
[mask
];
681 if (cond
== TCG_COND_NEVER
) {
684 account_inline_branch(s
, old_cc_op
);
687 case CC_OP_LTUGTU_32
:
688 case CC_OP_LTUGTU_64
:
689 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
690 if (cond
== TCG_COND_NEVER
) {
693 account_inline_branch(s
, old_cc_op
);
697 cond
= nz_cond
[mask
];
698 if (cond
== TCG_COND_NEVER
) {
701 account_inline_branch(s
, old_cc_op
);
716 account_inline_branch(s
, old_cc_op
);
731 account_inline_branch(s
, old_cc_op
);
735 switch (mask
& 0xa) {
736 case 8: /* src == 0 -> no one bit found */
739 case 2: /* src != 0 -> one bit found */
745 account_inline_branch(s
, old_cc_op
);
751 case 8 | 2: /* vr == 0 */
754 case 4 | 1: /* vr != 0 */
757 case 8 | 4: /* no carry -> vr >= src */
760 case 2 | 1: /* carry -> vr < src */
766 account_inline_branch(s
, old_cc_op
);
771 /* Note that CC=0 is impossible; treat it as dont-care. */
773 case 2: /* zero -> op1 == op2 */
776 case 4 | 1: /* !zero -> op1 != op2 */
779 case 4: /* borrow (!carry) -> op1 < op2 */
782 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
788 account_inline_branch(s
, old_cc_op
);
793 /* Calculate cc value. */
798 /* Jump based on CC. We'll load up the real cond below;
799 the assignment here merely avoids a compiler warning. */
800 account_noninline_branch(s
, old_cc_op
);
801 old_cc_op
= CC_OP_STATIC
;
802 cond
= TCG_COND_NEVER
;
806 /* Load up the arguments of the comparison. */
808 c
->g1
= c
->g2
= false;
812 c
->u
.s32
.a
= tcg_temp_new_i32();
813 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
814 c
->u
.s32
.b
= tcg_const_i32(0);
817 case CC_OP_LTUGTU_32
:
820 c
->u
.s32
.a
= tcg_temp_new_i32();
821 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
822 c
->u
.s32
.b
= tcg_temp_new_i32();
823 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
830 c
->u
.s64
.b
= tcg_const_i64(0);
834 case CC_OP_LTUGTU_64
:
838 c
->g1
= c
->g2
= true;
844 c
->u
.s64
.a
= tcg_temp_new_i64();
845 c
->u
.s64
.b
= tcg_const_i64(0);
846 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
851 c
->u
.s32
.a
= tcg_temp_new_i32();
852 c
->u
.s32
.b
= tcg_temp_new_i32();
853 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
854 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
855 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
857 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
864 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
865 c
->u
.s64
.b
= tcg_const_i64(0);
877 case 0x8 | 0x4 | 0x2: /* cc != 3 */
879 c
->u
.s32
.b
= tcg_const_i32(3);
881 case 0x8 | 0x4 | 0x1: /* cc != 2 */
883 c
->u
.s32
.b
= tcg_const_i32(2);
885 case 0x8 | 0x2 | 0x1: /* cc != 1 */
887 c
->u
.s32
.b
= tcg_const_i32(1);
889 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
892 c
->u
.s32
.a
= tcg_temp_new_i32();
893 c
->u
.s32
.b
= tcg_const_i32(0);
894 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
896 case 0x8 | 0x4: /* cc < 2 */
898 c
->u
.s32
.b
= tcg_const_i32(2);
900 case 0x8: /* cc == 0 */
902 c
->u
.s32
.b
= tcg_const_i32(0);
904 case 0x4 | 0x2 | 0x1: /* cc != 0 */
906 c
->u
.s32
.b
= tcg_const_i32(0);
908 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
911 c
->u
.s32
.a
= tcg_temp_new_i32();
912 c
->u
.s32
.b
= tcg_const_i32(0);
913 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
915 case 0x4: /* cc == 1 */
917 c
->u
.s32
.b
= tcg_const_i32(1);
919 case 0x2 | 0x1: /* cc > 1 */
921 c
->u
.s32
.b
= tcg_const_i32(1);
923 case 0x2: /* cc == 2 */
925 c
->u
.s32
.b
= tcg_const_i32(2);
927 case 0x1: /* cc == 3 */
929 c
->u
.s32
.b
= tcg_const_i32(3);
932 /* CC is masked by something else: (8 >> cc) & mask. */
935 c
->u
.s32
.a
= tcg_const_i32(8);
936 c
->u
.s32
.b
= tcg_const_i32(0);
937 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
938 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
949 static void free_compare(DisasCompare
*c
)
953 tcg_temp_free_i64(c
->u
.s64
.a
);
955 tcg_temp_free_i32(c
->u
.s32
.a
);
960 tcg_temp_free_i64(c
->u
.s64
.b
);
962 tcg_temp_free_i32(c
->u
.s32
.b
);
967 /* ====================================================================== */
968 /* Define the insn format enumeration. */
969 #define F0(N) FMT_##N,
970 #define F1(N, X1) F0(N)
971 #define F2(N, X1, X2) F0(N)
972 #define F3(N, X1, X2, X3) F0(N)
973 #define F4(N, X1, X2, X3, X4) F0(N)
974 #define F5(N, X1, X2, X3, X4, X5) F0(N)
977 #include "insn-format.def"
987 /* Define a structure to hold the decoded fields. We'll store each inside
988 an array indexed by an enum. In order to conserve memory, we'll arrange
989 for fields that do not exist at the same time to overlap, thus the "C"
990 for compact. For checking purposes there is an "O" for original index
991 as well that will be applied to availability bitmaps. */
993 enum DisasFieldIndexO
{
1016 enum DisasFieldIndexC
{
1047 struct DisasFields
{
1051 unsigned presentC
:16;
1052 unsigned int presentO
;
1056 /* This is the way fields are to be accessed out of DisasFields. */
1057 #define have_field(S, F) have_field1((S), FLD_O_##F)
1058 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1060 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1062 return (f
->presentO
>> c
) & 1;
1065 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1066 enum DisasFieldIndexC c
)
1068 assert(have_field1(f
, o
));
1072 /* Describe the layout of each field in each format. */
1073 typedef struct DisasField
{
1075 unsigned int size
:8;
1076 unsigned int type
:2;
1077 unsigned int indexC
:6;
1078 enum DisasFieldIndexO indexO
:8;
1081 typedef struct DisasFormatInfo
{
1082 DisasField op
[NUM_C_FIELD
];
1085 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1086 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1087 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1088 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1089 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1090 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1091 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1092 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1093 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1094 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1095 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1096 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1097 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1098 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1100 #define F0(N) { { } },
1101 #define F1(N, X1) { { X1 } },
1102 #define F2(N, X1, X2) { { X1, X2 } },
1103 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1104 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1105 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1107 static const DisasFormatInfo format_info
[] = {
1108 #include "insn-format.def"
1126 /* Generally, we'll extract operands into this structures, operate upon
1127 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1128 of routines below for more details. */
1130 bool g_out
, g_out2
, g_in1
, g_in2
;
1131 TCGv_i64 out
, out2
, in1
, in2
;
1135 /* Instructions can place constraints on their operands, raising specification
1136 exceptions if they are violated. To make this easy to automate, each "in1",
1137 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1138 of the following, or 0. To make this easy to document, we'll put the
1139 SPEC_<name> defines next to <name>. */
1141 #define SPEC_r1_even 1
1142 #define SPEC_r2_even 2
1143 #define SPEC_r3_even 4
1144 #define SPEC_r1_f128 8
1145 #define SPEC_r2_f128 16
1147 /* Return values from translate_one, indicating the state of the TB. */
1149 /* Continue the TB. */
1151 /* We have emitted one or more goto_tb. No fixup required. */
1153 /* We are not using a goto_tb (for whatever reason), but have updated
1154 the PC (for whatever reason), so there's no need to do it again on
1157 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1158 updated the PC for the next instruction to be executed. */
1160 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1161 No following code will be executed. */
1165 typedef enum DisasFacility
{
1166 FAC_Z
, /* zarch (default) */
1167 FAC_CASS
, /* compare and swap and store */
1168 FAC_CASS2
, /* compare and swap and store 2*/
1169 FAC_DFP
, /* decimal floating point */
1170 FAC_DFPR
, /* decimal floating point rounding */
1171 FAC_DO
, /* distinct operands */
1172 FAC_EE
, /* execute extensions */
1173 FAC_EI
, /* extended immediate */
1174 FAC_FPE
, /* floating point extension */
1175 FAC_FPSSH
, /* floating point support sign handling */
1176 FAC_FPRGR
, /* FPR-GR transfer */
1177 FAC_GIE
, /* general instructions extension */
1178 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1179 FAC_HW
, /* high-word */
1180 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1181 FAC_MIE
, /* miscellaneous-instruction-extensions */
1182 FAC_LAT
, /* load-and-trap */
1183 FAC_LOC
, /* load/store on condition */
1184 FAC_LD
, /* long displacement */
1185 FAC_PC
, /* population count */
1186 FAC_SCF
, /* store clock fast */
1187 FAC_SFLE
, /* store facility list extended */
1188 FAC_ILA
, /* interlocked access facility 1 */
1194 DisasFacility fac
:8;
1199 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1200 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1201 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1202 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1203 void (*help_cout
)(DisasContext
*, DisasOps
*);
1204 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1209 /* ====================================================================== */
1210 /* Miscellaneous helpers, used by several operations. */
1212 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1213 DisasOps
*o
, int mask
)
1215 int b2
= get_field(f
, b2
);
1216 int d2
= get_field(f
, d2
);
1219 o
->in2
= tcg_const_i64(d2
& mask
);
1221 o
->in2
= get_address(s
, 0, b2
, d2
);
1222 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1226 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1228 if (dest
== s
->next_pc
) {
1229 per_branch(s
, true);
1232 if (use_goto_tb(s
, dest
)) {
1234 per_breaking_event(s
);
1236 tcg_gen_movi_i64(psw_addr
, dest
);
1237 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1238 return EXIT_GOTO_TB
;
1240 tcg_gen_movi_i64(psw_addr
, dest
);
1241 per_branch(s
, false);
1242 return EXIT_PC_UPDATED
;
1246 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1247 bool is_imm
, int imm
, TCGv_i64 cdest
)
1250 uint64_t dest
= s
->pc
+ 2 * imm
;
1253 /* Take care of the special cases first. */
1254 if (c
->cond
== TCG_COND_NEVER
) {
1259 if (dest
== s
->next_pc
) {
1260 /* Branch to next. */
1261 per_branch(s
, true);
1265 if (c
->cond
== TCG_COND_ALWAYS
) {
1266 ret
= help_goto_direct(s
, dest
);
1270 if (TCGV_IS_UNUSED_I64(cdest
)) {
1271 /* E.g. bcr %r0 -> no branch. */
1275 if (c
->cond
== TCG_COND_ALWAYS
) {
1276 tcg_gen_mov_i64(psw_addr
, cdest
);
1277 per_branch(s
, false);
1278 ret
= EXIT_PC_UPDATED
;
1283 if (use_goto_tb(s
, s
->next_pc
)) {
1284 if (is_imm
&& use_goto_tb(s
, dest
)) {
1285 /* Both exits can use goto_tb. */
1288 lab
= gen_new_label();
1290 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1292 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1295 /* Branch not taken. */
1297 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1298 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1302 per_breaking_event(s
);
1304 tcg_gen_movi_i64(psw_addr
, dest
);
1305 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1309 /* Fallthru can use goto_tb, but taken branch cannot. */
1310 /* Store taken branch destination before the brcond. This
1311 avoids having to allocate a new local temp to hold it.
1312 We'll overwrite this in the not taken case anyway. */
1314 tcg_gen_mov_i64(psw_addr
, cdest
);
1317 lab
= gen_new_label();
1319 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1321 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1324 /* Branch not taken. */
1327 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1328 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1332 tcg_gen_movi_i64(psw_addr
, dest
);
1334 per_breaking_event(s
);
1335 ret
= EXIT_PC_UPDATED
;
1338 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1339 Most commonly we're single-stepping or some other condition that
1340 disables all use of goto_tb. Just update the PC and exit. */
1342 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1344 cdest
= tcg_const_i64(dest
);
1348 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1350 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1352 TCGv_i32 t0
= tcg_temp_new_i32();
1353 TCGv_i64 t1
= tcg_temp_new_i64();
1354 TCGv_i64 z
= tcg_const_i64(0);
1355 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1356 tcg_gen_extu_i32_i64(t1
, t0
);
1357 tcg_temp_free_i32(t0
);
1358 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1359 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1360 tcg_temp_free_i64(t1
);
1361 tcg_temp_free_i64(z
);
1365 tcg_temp_free_i64(cdest
);
1367 tcg_temp_free_i64(next
);
1369 ret
= EXIT_PC_UPDATED
;
1377 /* ====================================================================== */
1378 /* The operations. These perform the bulk of the work for any insn,
1379 usually after the operands have been loaded and output initialized. */
1381 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1384 z
= tcg_const_i64(0);
1385 n
= tcg_temp_new_i64();
1386 tcg_gen_neg_i64(n
, o
->in2
);
1387 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1388 tcg_temp_free_i64(n
);
1389 tcg_temp_free_i64(z
);
1393 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1395 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1399 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1401 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1405 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1407 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1408 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1412 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1414 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1418 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1423 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1425 /* The carry flag is the msb of CC, therefore the branch mask that would
1426 create that comparison is 3. Feeding the generated comparison to
1427 setcond produces the carry flag that we desire. */
1428 disas_jcc(s
, &cmp
, 3);
1429 carry
= tcg_temp_new_i64();
1431 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1433 TCGv_i32 t
= tcg_temp_new_i32();
1434 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1435 tcg_gen_extu_i32_i64(carry
, t
);
1436 tcg_temp_free_i32(t
);
1440 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1441 tcg_temp_free_i64(carry
);
1445 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1447 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1451 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1453 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1457 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1459 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1460 return_low128(o
->out2
);
1464 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1466 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1470 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1472 int shift
= s
->insn
->data
& 0xff;
1473 int size
= s
->insn
->data
>> 8;
1474 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1477 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1478 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1479 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1481 /* Produce the CC from only the bits manipulated. */
1482 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1483 set_cc_nz_u64(s
, cc_dst
);
1487 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1489 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1490 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1491 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1492 per_branch(s
, false);
1493 return EXIT_PC_UPDATED
;
1499 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1501 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1502 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1505 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1507 int m1
= get_field(s
->fields
, m1
);
1508 bool is_imm
= have_field(s
->fields
, i2
);
1509 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1512 disas_jcc(s
, &c
, m1
);
1513 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1516 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1518 int r1
= get_field(s
->fields
, r1
);
1519 bool is_imm
= have_field(s
->fields
, i2
);
1520 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1524 c
.cond
= TCG_COND_NE
;
1529 t
= tcg_temp_new_i64();
1530 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1531 store_reg32_i64(r1
, t
);
1532 c
.u
.s32
.a
= tcg_temp_new_i32();
1533 c
.u
.s32
.b
= tcg_const_i32(0);
1534 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1535 tcg_temp_free_i64(t
);
1537 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1540 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1542 int r1
= get_field(s
->fields
, r1
);
1543 int imm
= get_field(s
->fields
, i2
);
1547 c
.cond
= TCG_COND_NE
;
1552 t
= tcg_temp_new_i64();
1553 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1554 tcg_gen_subi_i64(t
, t
, 1);
1555 store_reg32h_i64(r1
, t
);
1556 c
.u
.s32
.a
= tcg_temp_new_i32();
1557 c
.u
.s32
.b
= tcg_const_i32(0);
1558 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1559 tcg_temp_free_i64(t
);
1561 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1564 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1566 int r1
= get_field(s
->fields
, r1
);
1567 bool is_imm
= have_field(s
->fields
, i2
);
1568 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1571 c
.cond
= TCG_COND_NE
;
1576 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1577 c
.u
.s64
.a
= regs
[r1
];
1578 c
.u
.s64
.b
= tcg_const_i64(0);
1580 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1583 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1585 int r1
= get_field(s
->fields
, r1
);
1586 int r3
= get_field(s
->fields
, r3
);
1587 bool is_imm
= have_field(s
->fields
, i2
);
1588 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1592 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1597 t
= tcg_temp_new_i64();
1598 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1599 c
.u
.s32
.a
= tcg_temp_new_i32();
1600 c
.u
.s32
.b
= tcg_temp_new_i32();
1601 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1602 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1603 store_reg32_i64(r1
, t
);
1604 tcg_temp_free_i64(t
);
1606 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1609 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1611 int r1
= get_field(s
->fields
, r1
);
1612 int r3
= get_field(s
->fields
, r3
);
1613 bool is_imm
= have_field(s
->fields
, i2
);
1614 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1617 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1620 if (r1
== (r3
| 1)) {
1621 c
.u
.s64
.b
= load_reg(r3
| 1);
1624 c
.u
.s64
.b
= regs
[r3
| 1];
1628 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1629 c
.u
.s64
.a
= regs
[r1
];
1632 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1635 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1637 int imm
, m3
= get_field(s
->fields
, m3
);
1641 c
.cond
= ltgt_cond
[m3
];
1642 if (s
->insn
->data
) {
1643 c
.cond
= tcg_unsigned_cond(c
.cond
);
1645 c
.is_64
= c
.g1
= c
.g2
= true;
1649 is_imm
= have_field(s
->fields
, i4
);
1651 imm
= get_field(s
->fields
, i4
);
1654 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1655 get_field(s
->fields
, d4
));
1658 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1661 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1663 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1668 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1670 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1675 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1677 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1682 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1684 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1685 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1686 tcg_temp_free_i32(m3
);
1687 gen_set_cc_nz_f32(s
, o
->in2
);
1691 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1693 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1694 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1695 tcg_temp_free_i32(m3
);
1696 gen_set_cc_nz_f64(s
, o
->in2
);
1700 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1702 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1703 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1704 tcg_temp_free_i32(m3
);
1705 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1709 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1711 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1712 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1713 tcg_temp_free_i32(m3
);
1714 gen_set_cc_nz_f32(s
, o
->in2
);
1718 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1720 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1721 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1722 tcg_temp_free_i32(m3
);
1723 gen_set_cc_nz_f64(s
, o
->in2
);
1727 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1729 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1730 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1731 tcg_temp_free_i32(m3
);
1732 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1736 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1738 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1739 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1740 tcg_temp_free_i32(m3
);
1741 gen_set_cc_nz_f32(s
, o
->in2
);
1745 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1747 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1748 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1749 tcg_temp_free_i32(m3
);
1750 gen_set_cc_nz_f64(s
, o
->in2
);
1754 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1756 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1757 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1758 tcg_temp_free_i32(m3
);
1759 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1763 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1765 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1766 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1767 tcg_temp_free_i32(m3
);
1768 gen_set_cc_nz_f32(s
, o
->in2
);
1772 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1774 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1775 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1776 tcg_temp_free_i32(m3
);
1777 gen_set_cc_nz_f64(s
, o
->in2
);
1781 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1783 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1784 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1785 tcg_temp_free_i32(m3
);
1786 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1790 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1792 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1793 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1794 tcg_temp_free_i32(m3
);
1798 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1800 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1801 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1802 tcg_temp_free_i32(m3
);
1806 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1808 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1809 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1810 tcg_temp_free_i32(m3
);
1811 return_low128(o
->out2
);
1815 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1817 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1818 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1819 tcg_temp_free_i32(m3
);
1823 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1825 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1826 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1827 tcg_temp_free_i32(m3
);
1831 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1833 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1834 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1835 tcg_temp_free_i32(m3
);
1836 return_low128(o
->out2
);
1840 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1842 int r2
= get_field(s
->fields
, r2
);
1843 TCGv_i64 len
= tcg_temp_new_i64();
1845 potential_page_fault(s
);
1846 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1848 return_low128(o
->out
);
1850 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1851 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1852 tcg_temp_free_i64(len
);
1857 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1859 int l
= get_field(s
->fields
, l1
);
1864 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1865 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1868 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1869 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1872 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1873 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1876 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1877 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1880 potential_page_fault(s
);
1881 vl
= tcg_const_i32(l
);
1882 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1883 tcg_temp_free_i32(vl
);
1887 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1891 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1893 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1894 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1895 potential_page_fault(s
);
1896 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1897 tcg_temp_free_i32(r1
);
1898 tcg_temp_free_i32(r3
);
1903 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1905 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1906 TCGv_i32 t1
= tcg_temp_new_i32();
1907 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1908 potential_page_fault(s
);
1909 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1911 tcg_temp_free_i32(t1
);
1912 tcg_temp_free_i32(m3
);
1916 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1918 potential_page_fault(s
);
1919 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1921 return_low128(o
->in2
);
1925 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1927 TCGv_i64 t
= tcg_temp_new_i64();
1928 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1929 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1930 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1931 tcg_temp_free_i64(t
);
1935 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1937 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1938 int d2
= get_field(s
->fields
, d2
);
1939 int b2
= get_field(s
->fields
, b2
);
1940 int is_64
= s
->insn
->data
;
1941 TCGv_i64 addr
, mem
, cc
, z
;
1943 /* Note that in1 = R3 (new value) and
1944 in2 = (zero-extended) R1 (expected value). */
1946 /* Load the memory into the (temporary) output. While the PoO only talks
1947 about moving the memory to R1 on inequality, if we include equality it
1948 means that R1 is equal to the memory in all conditions. */
1949 addr
= get_address(s
, 0, b2
, d2
);
1951 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1953 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1956 /* Are the memory and expected values (un)equal? Note that this setcond
1957 produces the output CC value, thus the NE sense of the test. */
1958 cc
= tcg_temp_new_i64();
1959 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1961 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1962 Recall that we are allowed to unconditionally issue the store (and
1963 thus any possible write trap), so (re-)store the original contents
1964 of MEM in case of inequality. */
1965 z
= tcg_const_i64(0);
1966 mem
= tcg_temp_new_i64();
1967 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1969 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1971 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1973 tcg_temp_free_i64(z
);
1974 tcg_temp_free_i64(mem
);
1975 tcg_temp_free_i64(addr
);
1977 /* Store CC back to cc_op. Wait until after the store so that any
1978 exception gets the old cc_op value. */
1979 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1980 tcg_temp_free_i64(cc
);
1985 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1987 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1988 int r1
= get_field(s
->fields
, r1
);
1989 int r3
= get_field(s
->fields
, r3
);
1990 int d2
= get_field(s
->fields
, d2
);
1991 int b2
= get_field(s
->fields
, b2
);
1992 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
1994 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1996 addrh
= get_address(s
, 0, b2
, d2
);
1997 addrl
= get_address(s
, 0, b2
, d2
+ 8);
1998 outh
= tcg_temp_new_i64();
1999 outl
= tcg_temp_new_i64();
2001 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
2002 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
2004 /* Fold the double-word compare with arithmetic. */
2005 cc
= tcg_temp_new_i64();
2006 z
= tcg_temp_new_i64();
2007 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
2008 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
2009 tcg_gen_or_i64(cc
, cc
, z
);
2010 tcg_gen_movi_i64(z
, 0);
2011 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
2013 memh
= tcg_temp_new_i64();
2014 meml
= tcg_temp_new_i64();
2015 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
2016 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
2017 tcg_temp_free_i64(z
);
2019 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
2020 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
2021 tcg_temp_free_i64(memh
);
2022 tcg_temp_free_i64(meml
);
2023 tcg_temp_free_i64(addrh
);
2024 tcg_temp_free_i64(addrl
);
2026 /* Save back state now that we've passed all exceptions. */
2027 tcg_gen_mov_i64(regs
[r1
], outh
);
2028 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
2029 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2030 tcg_temp_free_i64(outh
);
2031 tcg_temp_free_i64(outl
);
2032 tcg_temp_free_i64(cc
);
2037 #ifndef CONFIG_USER_ONLY
2038 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2040 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2041 check_privileged(s
);
2042 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
2043 tcg_temp_free_i32(r1
);
2049 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2051 TCGv_i64 t1
= tcg_temp_new_i64();
2052 TCGv_i32 t2
= tcg_temp_new_i32();
2053 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2054 gen_helper_cvd(t1
, t2
);
2055 tcg_temp_free_i32(t2
);
2056 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2057 tcg_temp_free_i64(t1
);
2061 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2063 int m3
= get_field(s
->fields
, m3
);
2064 TCGLabel
*lab
= gen_new_label();
2067 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2068 if (s
->insn
->data
) {
2069 c
= tcg_unsigned_cond(c
);
2071 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2080 #ifndef CONFIG_USER_ONLY
2081 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2083 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2084 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2085 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2087 check_privileged(s
);
2091 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2093 tcg_temp_free_i32(func_code
);
2094 tcg_temp_free_i32(r3
);
2095 tcg_temp_free_i32(r1
);
2100 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2102 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2103 return_low128(o
->out
);
2107 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2109 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2110 return_low128(o
->out
);
2114 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2116 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2117 return_low128(o
->out
);
2121 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2123 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2124 return_low128(o
->out
);
2128 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2130 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2134 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2136 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2140 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2142 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2143 return_low128(o
->out2
);
2147 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2149 int r2
= get_field(s
->fields
, r2
);
2150 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2154 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2156 /* No cache information provided. */
2157 tcg_gen_movi_i64(o
->out
, -1);
2161 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2163 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2167 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2169 int r1
= get_field(s
->fields
, r1
);
2170 int r2
= get_field(s
->fields
, r2
);
2171 TCGv_i64 t
= tcg_temp_new_i64();
2173 /* Note the "subsequently" in the PoO, which implies a defined result
2174 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2175 tcg_gen_shri_i64(t
, psw_mask
, 32);
2176 store_reg32_i64(r1
, t
);
2178 store_reg32_i64(r2
, psw_mask
);
2181 tcg_temp_free_i64(t
);
2185 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2187 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2188 tb->flags, (ab)use the tb->cs_base field as the address of
2189 the template in memory, and grab 8 bits of tb->flags/cflags for
2190 the contents of the register. We would then recognize all this
2191 in gen_intermediate_code_internal, generating code for exactly
2192 one instruction. This new TB then gets executed normally.
2194 On the other hand, this seems to be mostly used for modifying
2195 MVC inside of memcpy, which needs a helper call anyway. So
2196 perhaps this doesn't bear thinking about any further. */
2203 tmp
= tcg_const_i64(s
->next_pc
);
2204 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2205 tcg_temp_free_i64(tmp
);
2210 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2212 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2213 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2214 tcg_temp_free_i32(m3
);
2218 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2220 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2221 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2222 tcg_temp_free_i32(m3
);
2226 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2228 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2229 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2230 return_low128(o
->out2
);
2231 tcg_temp_free_i32(m3
);
2235 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2237 /* We'll use the original input for cc computation, since we get to
2238 compare that against 0, which ought to be better than comparing
2239 the real output against 64. It also lets cc_dst be a convenient
2240 temporary during our computation. */
2241 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2243 /* R1 = IN ? CLZ(IN) : 64. */
2244 gen_helper_clz(o
->out
, o
->in2
);
2246 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2247 value by 64, which is undefined. But since the shift is 64 iff the
2248 input is zero, we still get the correct result after and'ing. */
2249 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2250 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2251 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2255 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2257 int m3
= get_field(s
->fields
, m3
);
2258 int pos
, len
, base
= s
->insn
->data
;
2259 TCGv_i64 tmp
= tcg_temp_new_i64();
2264 /* Effectively a 32-bit load. */
2265 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2272 /* Effectively a 16-bit load. */
2273 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2281 /* Effectively an 8-bit load. */
2282 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2287 pos
= base
+ ctz32(m3
) * 8;
2288 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2289 ccm
= ((1ull << len
) - 1) << pos
;
2293 /* This is going to be a sequence of loads and inserts. */
2294 pos
= base
+ 32 - 8;
2298 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2299 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2300 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2303 m3
= (m3
<< 1) & 0xf;
2309 tcg_gen_movi_i64(tmp
, ccm
);
2310 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2311 tcg_temp_free_i64(tmp
);
2315 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2317 int shift
= s
->insn
->data
& 0xff;
2318 int size
= s
->insn
->data
>> 8;
2319 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2323 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2328 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2330 t1
= tcg_temp_new_i64();
2331 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2332 tcg_gen_shri_i64(t1
, t1
, 36);
2333 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2335 tcg_gen_extu_i32_i64(t1
, cc_op
);
2336 tcg_gen_shli_i64(t1
, t1
, 28);
2337 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2338 tcg_temp_free_i64(t1
);
2342 #ifndef CONFIG_USER_ONLY
2343 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2345 check_privileged(s
);
2346 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2350 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2352 check_privileged(s
);
2353 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2358 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2360 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2364 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2366 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2370 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2372 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2376 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2378 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2382 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2384 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2385 return_low128(o
->out2
);
2389 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2391 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2392 return_low128(o
->out2
);
2396 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2398 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2402 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2404 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2408 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2410 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2414 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2416 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2420 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2422 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2426 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2428 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2432 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2434 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2438 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2440 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2444 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2446 TCGLabel
*lab
= gen_new_label();
2447 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2448 /* The value is stored even in case of trap. */
2449 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2455 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2457 TCGLabel
*lab
= gen_new_label();
2458 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2459 /* The value is stored even in case of trap. */
2460 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2466 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2468 TCGLabel
*lab
= gen_new_label();
2469 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2470 /* The value is stored even in case of trap. */
2471 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2477 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2479 TCGLabel
*lab
= gen_new_label();
2480 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2481 /* The value is stored even in case of trap. */
2482 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2488 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2490 TCGLabel
*lab
= gen_new_label();
2491 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2492 /* The value is stored even in case of trap. */
2493 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2499 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2503 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2506 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2510 TCGv_i32 t32
= tcg_temp_new_i32();
2513 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2516 t
= tcg_temp_new_i64();
2517 tcg_gen_extu_i32_i64(t
, t32
);
2518 tcg_temp_free_i32(t32
);
2520 z
= tcg_const_i64(0);
2521 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2522 tcg_temp_free_i64(t
);
2523 tcg_temp_free_i64(z
);
2529 #ifndef CONFIG_USER_ONLY
2530 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2532 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2533 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2534 check_privileged(s
);
2535 potential_page_fault(s
);
2536 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2537 tcg_temp_free_i32(r1
);
2538 tcg_temp_free_i32(r3
);
2542 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2544 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2545 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2546 check_privileged(s
);
2547 potential_page_fault(s
);
2548 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2549 tcg_temp_free_i32(r1
);
2550 tcg_temp_free_i32(r3
);
2553 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2555 check_privileged(s
);
2556 potential_page_fault(s
);
2557 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2562 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2566 check_privileged(s
);
2567 per_breaking_event(s
);
2569 t1
= tcg_temp_new_i64();
2570 t2
= tcg_temp_new_i64();
2571 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2572 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2573 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2574 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2575 tcg_gen_shli_i64(t1
, t1
, 32);
2576 gen_helper_load_psw(cpu_env
, t1
, t2
);
2577 tcg_temp_free_i64(t1
);
2578 tcg_temp_free_i64(t2
);
2579 return EXIT_NORETURN
;
2582 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2586 check_privileged(s
);
2587 per_breaking_event(s
);
2589 t1
= tcg_temp_new_i64();
2590 t2
= tcg_temp_new_i64();
2591 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2592 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2593 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2594 gen_helper_load_psw(cpu_env
, t1
, t2
);
2595 tcg_temp_free_i64(t1
);
2596 tcg_temp_free_i64(t2
);
2597 return EXIT_NORETURN
;
2601 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2603 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2604 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2605 potential_page_fault(s
);
2606 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2607 tcg_temp_free_i32(r1
);
2608 tcg_temp_free_i32(r3
);
2612 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2614 int r1
= get_field(s
->fields
, r1
);
2615 int r3
= get_field(s
->fields
, r3
);
2618 /* Only one register to read. */
2619 t1
= tcg_temp_new_i64();
2620 if (unlikely(r1
== r3
)) {
2621 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2622 store_reg32_i64(r1
, t1
);
2627 /* First load the values of the first and last registers to trigger
2628 possible page faults. */
2629 t2
= tcg_temp_new_i64();
2630 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2631 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2632 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2633 store_reg32_i64(r1
, t1
);
2634 store_reg32_i64(r3
, t2
);
2636 /* Only two registers to read. */
2637 if (((r1
+ 1) & 15) == r3
) {
2643 /* Then load the remaining registers. Page fault can't occur. */
2645 tcg_gen_movi_i64(t2
, 4);
2648 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2649 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2650 store_reg32_i64(r1
, t1
);
2658 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2660 int r1
= get_field(s
->fields
, r1
);
2661 int r3
= get_field(s
->fields
, r3
);
2664 /* Only one register to read. */
2665 t1
= tcg_temp_new_i64();
2666 if (unlikely(r1
== r3
)) {
2667 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2668 store_reg32h_i64(r1
, t1
);
2673 /* First load the values of the first and last registers to trigger
2674 possible page faults. */
2675 t2
= tcg_temp_new_i64();
2676 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2677 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2678 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2679 store_reg32h_i64(r1
, t1
);
2680 store_reg32h_i64(r3
, t2
);
2682 /* Only two registers to read. */
2683 if (((r1
+ 1) & 15) == r3
) {
2689 /* Then load the remaining registers. Page fault can't occur. */
2691 tcg_gen_movi_i64(t2
, 4);
2694 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2695 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2696 store_reg32h_i64(r1
, t1
);
2704 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2706 int r1
= get_field(s
->fields
, r1
);
2707 int r3
= get_field(s
->fields
, r3
);
2710 /* Only one register to read. */
2711 if (unlikely(r1
== r3
)) {
2712 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2716 /* First load the values of the first and last registers to trigger
2717 possible page faults. */
2718 t1
= tcg_temp_new_i64();
2719 t2
= tcg_temp_new_i64();
2720 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2721 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2722 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2723 tcg_gen_mov_i64(regs
[r1
], t1
);
2726 /* Only two registers to read. */
2727 if (((r1
+ 1) & 15) == r3
) {
2732 /* Then load the remaining registers. Page fault can't occur. */
2734 tcg_gen_movi_i64(t1
, 8);
2737 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2738 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2745 #ifndef CONFIG_USER_ONLY
2746 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2748 check_privileged(s
);
2749 potential_page_fault(s
);
2750 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2754 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2756 check_privileged(s
);
2757 potential_page_fault(s
);
2758 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2763 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2766 o
->g_out
= o
->g_in2
;
2767 TCGV_UNUSED_I64(o
->in2
);
2772 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2774 int b2
= get_field(s
->fields
, b2
);
2775 TCGv ar1
= tcg_temp_new_i64();
2778 o
->g_out
= o
->g_in2
;
2779 TCGV_UNUSED_I64(o
->in2
);
2782 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2783 case PSW_ASC_PRIMARY
>> 32:
2784 tcg_gen_movi_i64(ar1
, 0);
2786 case PSW_ASC_ACCREG
>> 32:
2787 tcg_gen_movi_i64(ar1
, 1);
2789 case PSW_ASC_SECONDARY
>> 32:
2791 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2793 tcg_gen_movi_i64(ar1
, 0);
2796 case PSW_ASC_HOME
>> 32:
2797 tcg_gen_movi_i64(ar1
, 2);
2801 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2802 tcg_temp_free_i64(ar1
);
2807 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2811 o
->g_out
= o
->g_in1
;
2812 o
->g_out2
= o
->g_in2
;
2813 TCGV_UNUSED_I64(o
->in1
);
2814 TCGV_UNUSED_I64(o
->in2
);
2815 o
->g_in1
= o
->g_in2
= false;
2819 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2821 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2822 potential_page_fault(s
);
2823 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2824 tcg_temp_free_i32(l
);
2828 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2830 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2831 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2832 potential_page_fault(s
);
2833 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2834 tcg_temp_free_i32(r1
);
2835 tcg_temp_free_i32(r2
);
2840 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2842 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2843 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2844 potential_page_fault(s
);
2845 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2846 tcg_temp_free_i32(r1
);
2847 tcg_temp_free_i32(r3
);
2852 #ifndef CONFIG_USER_ONLY
2853 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2855 int r1
= get_field(s
->fields
, l1
);
2856 check_privileged(s
);
2857 potential_page_fault(s
);
2858 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2863 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2865 int r1
= get_field(s
->fields
, l1
);
2866 check_privileged(s
);
2867 potential_page_fault(s
);
2868 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2874 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2876 potential_page_fault(s
);
2877 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2882 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2884 potential_page_fault(s
);
2885 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2887 return_low128(o
->in2
);
2891 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2893 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2897 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2899 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2903 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2905 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2909 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2911 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2915 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2917 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2921 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2923 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2924 return_low128(o
->out2
);
2928 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2930 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2931 return_low128(o
->out2
);
2935 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2937 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2938 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2939 tcg_temp_free_i64(r3
);
2943 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2945 int r3
= get_field(s
->fields
, r3
);
2946 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2950 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2952 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2953 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2954 tcg_temp_free_i64(r3
);
2958 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2960 int r3
= get_field(s
->fields
, r3
);
2961 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2965 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2968 z
= tcg_const_i64(0);
2969 n
= tcg_temp_new_i64();
2970 tcg_gen_neg_i64(n
, o
->in2
);
2971 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
2972 tcg_temp_free_i64(n
);
2973 tcg_temp_free_i64(z
);
2977 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2979 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2983 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2985 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2989 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2991 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2992 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2996 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2998 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2999 potential_page_fault(s
);
3000 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3001 tcg_temp_free_i32(l
);
3006 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3008 tcg_gen_neg_i64(o
->out
, o
->in2
);
3012 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3014 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3018 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3020 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3024 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3026 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3027 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3031 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3033 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3034 potential_page_fault(s
);
3035 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3036 tcg_temp_free_i32(l
);
3041 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3043 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3047 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3049 int shift
= s
->insn
->data
& 0xff;
3050 int size
= s
->insn
->data
>> 8;
3051 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3054 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3055 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3057 /* Produce the CC from only the bits manipulated. */
3058 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3059 set_cc_nz_u64(s
, cc_dst
);
3063 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3065 gen_helper_popcnt(o
->out
, o
->in2
);
3069 #ifndef CONFIG_USER_ONLY
3070 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3072 check_privileged(s
);
3073 gen_helper_ptlb(cpu_env
);
3078 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3080 int i3
= get_field(s
->fields
, i3
);
3081 int i4
= get_field(s
->fields
, i4
);
3082 int i5
= get_field(s
->fields
, i5
);
3083 int do_zero
= i4
& 0x80;
3084 uint64_t mask
, imask
, pmask
;
3087 /* Adjust the arguments for the specific insn. */
3088 switch (s
->fields
->op2
) {
3089 case 0x55: /* risbg */
3094 case 0x5d: /* risbhg */
3097 pmask
= 0xffffffff00000000ull
;
3099 case 0x51: /* risblg */
3102 pmask
= 0x00000000ffffffffull
;
3108 /* MASK is the set of bits to be inserted from R2.
3109 Take care for I3/I4 wraparound. */
3112 mask
^= pmask
>> i4
>> 1;
3114 mask
|= ~(pmask
>> i4
>> 1);
3118 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3119 insns, we need to keep the other half of the register. */
3120 imask
= ~mask
| ~pmask
;
3122 if (s
->fields
->op2
== 0x55) {
3129 /* In some cases we can implement this with deposit, which can be more
3130 efficient on some hosts. */
3131 if (~mask
== imask
&& i3
<= i4
) {
3132 if (s
->fields
->op2
== 0x5d) {
3135 /* Note that we rotate the bits to be inserted to the lsb, not to
3136 the position as described in the PoO. */
3139 rot
= (i5
- pos
) & 63;
3145 /* Rotate the input as necessary. */
3146 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3148 /* Insert the selected bits into the output. */
3150 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3151 } else if (imask
== 0) {
3152 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3154 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3155 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3156 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3161 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3163 int i3
= get_field(s
->fields
, i3
);
3164 int i4
= get_field(s
->fields
, i4
);
3165 int i5
= get_field(s
->fields
, i5
);
3168 /* If this is a test-only form, arrange to discard the result. */
3170 o
->out
= tcg_temp_new_i64();
3178 /* MASK is the set of bits to be operated on from R2.
3179 Take care for I3/I4 wraparound. */
3182 mask
^= ~0ull >> i4
>> 1;
3184 mask
|= ~(~0ull >> i4
>> 1);
3187 /* Rotate the input as necessary. */
3188 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3191 switch (s
->fields
->op2
) {
3192 case 0x55: /* AND */
3193 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3194 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3197 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3198 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3200 case 0x57: /* XOR */
3201 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3202 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3209 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3210 set_cc_nz_u64(s
, cc_dst
);
3214 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3216 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3220 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3222 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3226 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3228 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3232 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3234 TCGv_i32 t1
= tcg_temp_new_i32();
3235 TCGv_i32 t2
= tcg_temp_new_i32();
3236 TCGv_i32 to
= tcg_temp_new_i32();
3237 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3238 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3239 tcg_gen_rotl_i32(to
, t1
, t2
);
3240 tcg_gen_extu_i32_i64(o
->out
, to
);
3241 tcg_temp_free_i32(t1
);
3242 tcg_temp_free_i32(t2
);
3243 tcg_temp_free_i32(to
);
3247 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3249 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3253 #ifndef CONFIG_USER_ONLY
3254 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3256 check_privileged(s
);
3257 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3262 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3264 check_privileged(s
);
3265 gen_helper_sacf(cpu_env
, o
->in2
);
3266 /* Addressing mode has changed, so end the block. */
3267 return EXIT_PC_STALE
;
3271 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3273 int sam
= s
->insn
->data
;
3289 /* Bizarre but true, we check the address of the current insn for the
3290 specification exception, not the next to be executed. Thus the PoO
3291 documents that Bad Things Happen two bytes before the end. */
3292 if (s
->pc
& ~mask
) {
3293 gen_program_exception(s
, PGM_SPECIFICATION
);
3294 return EXIT_NORETURN
;
3298 tsam
= tcg_const_i64(sam
);
3299 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3300 tcg_temp_free_i64(tsam
);
3302 /* Always exit the TB, since we (may have) changed execution mode. */
3303 return EXIT_PC_STALE
;
3306 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3308 int r1
= get_field(s
->fields
, r1
);
3309 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3313 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3315 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3319 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3321 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3325 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3327 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3328 return_low128(o
->out2
);
3332 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3334 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3338 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3340 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3344 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3346 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3347 return_low128(o
->out2
);
3351 #ifndef CONFIG_USER_ONLY
3352 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3354 check_privileged(s
);
3355 potential_page_fault(s
);
3356 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3361 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3363 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3364 check_privileged(s
);
3365 potential_page_fault(s
);
3366 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3367 tcg_temp_free_i32(r1
);
3372 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3379 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3381 /* We want to store when the condition is fulfilled, so branch
3382 out when it's not */
3383 c
.cond
= tcg_invert_cond(c
.cond
);
3385 lab
= gen_new_label();
3387 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3389 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3393 r1
= get_field(s
->fields
, r1
);
3394 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3395 if (s
->insn
->data
) {
3396 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3398 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3400 tcg_temp_free_i64(a
);
3406 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3408 uint64_t sign
= 1ull << s
->insn
->data
;
3409 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3410 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3411 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3412 /* The arithmetic left shift is curious in that it does not affect
3413 the sign bit. Copy that over from the source unchanged. */
3414 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3415 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3416 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3420 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3422 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3426 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3428 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3432 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3434 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3438 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3440 gen_helper_sfpc(cpu_env
, o
->in2
);
3444 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3446 gen_helper_sfas(cpu_env
, o
->in2
);
3450 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3452 int b2
= get_field(s
->fields
, b2
);
3453 int d2
= get_field(s
->fields
, d2
);
3454 TCGv_i64 t1
= tcg_temp_new_i64();
3455 TCGv_i64 t2
= tcg_temp_new_i64();
3458 switch (s
->fields
->op2
) {
3459 case 0x99: /* SRNM */
3462 case 0xb8: /* SRNMB */
3465 case 0xb9: /* SRNMT */
3471 mask
= (1 << len
) - 1;
3473 /* Insert the value into the appropriate field of the FPC. */
3475 tcg_gen_movi_i64(t1
, d2
& mask
);
3477 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3478 tcg_gen_andi_i64(t1
, t1
, mask
);
3480 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3481 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3482 tcg_temp_free_i64(t1
);
3484 /* Then install the new FPC to set the rounding mode in fpu_status. */
3485 gen_helper_sfpc(cpu_env
, t2
);
3486 tcg_temp_free_i64(t2
);
3490 #ifndef CONFIG_USER_ONLY
3491 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3493 check_privileged(s
);
3494 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3495 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3499 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3501 check_privileged(s
);
3502 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3506 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3508 check_privileged(s
);
3509 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3513 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3515 check_privileged(s
);
3516 /* ??? Surely cpu address != cpu number. In any case the previous
3517 version of this stored more than the required half-word, so it
3518 is unlikely this has ever been tested. */
3519 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3523 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3525 gen_helper_stck(o
->out
, cpu_env
);
3526 /* ??? We don't implement clock states. */
3527 gen_op_movi_cc(s
, 0);
3531 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3533 TCGv_i64 c1
= tcg_temp_new_i64();
3534 TCGv_i64 c2
= tcg_temp_new_i64();
3535 gen_helper_stck(c1
, cpu_env
);
3536 /* Shift the 64-bit value into its place as a zero-extended
3537 104-bit value. Note that "bit positions 64-103 are always
3538 non-zero so that they compare differently to STCK"; we set
3539 the least significant bit to 1. */
3540 tcg_gen_shli_i64(c2
, c1
, 56);
3541 tcg_gen_shri_i64(c1
, c1
, 8);
3542 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3543 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3544 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3545 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3546 tcg_temp_free_i64(c1
);
3547 tcg_temp_free_i64(c2
);
3548 /* ??? We don't implement clock states. */
3549 gen_op_movi_cc(s
, 0);
3553 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3555 check_privileged(s
);
3556 gen_helper_sckc(cpu_env
, o
->in2
);
3560 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3562 check_privileged(s
);
3563 gen_helper_stckc(o
->out
, cpu_env
);
3567 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3569 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3570 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3571 check_privileged(s
);
3572 potential_page_fault(s
);
3573 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3574 tcg_temp_free_i32(r1
);
3575 tcg_temp_free_i32(r3
);
3579 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3581 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3582 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3583 check_privileged(s
);
3584 potential_page_fault(s
);
3585 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3586 tcg_temp_free_i32(r1
);
3587 tcg_temp_free_i32(r3
);
3591 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3593 TCGv_i64 t1
= tcg_temp_new_i64();
3595 check_privileged(s
);
3596 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3597 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3598 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3599 tcg_temp_free_i64(t1
);
3604 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3606 check_privileged(s
);
3607 gen_helper_spt(cpu_env
, o
->in2
);
3611 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3614 /* We really ought to have more complete indication of facilities
3615 that we implement. Address this when STFLE is implemented. */
3616 check_privileged(s
);
3617 f
= tcg_const_i64(0xc0000000);
3618 a
= tcg_const_i64(200);
3619 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3620 tcg_temp_free_i64(f
);
3621 tcg_temp_free_i64(a
);
3625 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3627 check_privileged(s
);
3628 gen_helper_stpt(o
->out
, cpu_env
);
3632 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3634 check_privileged(s
);
3635 potential_page_fault(s
);
3636 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3641 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3643 check_privileged(s
);
3644 gen_helper_spx(cpu_env
, o
->in2
);
3648 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3650 check_privileged(s
);
3651 potential_page_fault(s
);
3652 gen_helper_xsch(cpu_env
, regs
[1]);
3657 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3659 check_privileged(s
);
3660 potential_page_fault(s
);
3661 gen_helper_csch(cpu_env
, regs
[1]);
3666 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3668 check_privileged(s
);
3669 potential_page_fault(s
);
3670 gen_helper_hsch(cpu_env
, regs
[1]);
3675 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3677 check_privileged(s
);
3678 potential_page_fault(s
);
3679 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3684 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3686 check_privileged(s
);
3687 potential_page_fault(s
);
3688 gen_helper_rchp(cpu_env
, regs
[1]);
3693 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3695 check_privileged(s
);
3696 potential_page_fault(s
);
3697 gen_helper_rsch(cpu_env
, regs
[1]);
3702 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3704 check_privileged(s
);
3705 potential_page_fault(s
);
3706 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3711 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3713 check_privileged(s
);
3714 potential_page_fault(s
);
3715 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3720 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3722 check_privileged(s
);
3723 potential_page_fault(s
);
3724 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3729 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3731 check_privileged(s
);
3732 potential_page_fault(s
);
3733 gen_helper_chsc(cpu_env
, o
->in2
);
3738 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3740 check_privileged(s
);
3741 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3742 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3746 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3748 uint64_t i2
= get_field(s
->fields
, i2
);
3751 check_privileged(s
);
3753 /* It is important to do what the instruction name says: STORE THEN.
3754 If we let the output hook perform the store then if we fault and
3755 restart, we'll have the wrong SYSTEM MASK in place. */
3756 t
= tcg_temp_new_i64();
3757 tcg_gen_shri_i64(t
, psw_mask
, 56);
3758 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3759 tcg_temp_free_i64(t
);
3761 if (s
->fields
->op
== 0xac) {
3762 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3763 (i2
<< 56) | 0x00ffffffffffffffull
);
3765 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3770 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3772 check_privileged(s
);
3773 potential_page_fault(s
);
3774 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3778 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3780 check_privileged(s
);
3781 potential_page_fault(s
);
3782 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3787 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3789 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3793 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3795 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3799 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3801 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3805 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3807 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3811 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3813 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3814 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3815 potential_page_fault(s
);
3816 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3817 tcg_temp_free_i32(r1
);
3818 tcg_temp_free_i32(r3
);
3822 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3824 int m3
= get_field(s
->fields
, m3
);
3825 int pos
, base
= s
->insn
->data
;
3826 TCGv_i64 tmp
= tcg_temp_new_i64();
3828 pos
= base
+ ctz32(m3
) * 8;
3831 /* Effectively a 32-bit store. */
3832 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3833 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3839 /* Effectively a 16-bit store. */
3840 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3841 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3848 /* Effectively an 8-bit store. */
3849 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3850 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3854 /* This is going to be a sequence of shifts and stores. */
3855 pos
= base
+ 32 - 8;
3858 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3859 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3860 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3862 m3
= (m3
<< 1) & 0xf;
3867 tcg_temp_free_i64(tmp
);
3871 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3873 int r1
= get_field(s
->fields
, r1
);
3874 int r3
= get_field(s
->fields
, r3
);
3875 int size
= s
->insn
->data
;
3876 TCGv_i64 tsize
= tcg_const_i64(size
);
3880 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3882 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3887 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3891 tcg_temp_free_i64(tsize
);
3895 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3897 int r1
= get_field(s
->fields
, r1
);
3898 int r3
= get_field(s
->fields
, r3
);
3899 TCGv_i64 t
= tcg_temp_new_i64();
3900 TCGv_i64 t4
= tcg_const_i64(4);
3901 TCGv_i64 t32
= tcg_const_i64(32);
3904 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3905 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3909 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3913 tcg_temp_free_i64(t
);
3914 tcg_temp_free_i64(t4
);
3915 tcg_temp_free_i64(t32
);
3919 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3921 potential_page_fault(s
);
3922 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3924 return_low128(o
->in2
);
3928 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3930 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3934 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3939 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3941 /* The !borrow flag is the msb of CC. Since we want the inverse of
3942 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3943 disas_jcc(s
, &cmp
, 8 | 4);
3944 borrow
= tcg_temp_new_i64();
3946 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3948 TCGv_i32 t
= tcg_temp_new_i32();
3949 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3950 tcg_gen_extu_i32_i64(borrow
, t
);
3951 tcg_temp_free_i32(t
);
3955 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3956 tcg_temp_free_i64(borrow
);
3960 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3967 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3968 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3969 tcg_temp_free_i32(t
);
3971 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3972 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3973 tcg_temp_free_i32(t
);
3975 gen_exception(EXCP_SVC
);
3976 return EXIT_NORETURN
;
3979 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3981 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3986 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3988 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3993 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3995 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
4000 #ifndef CONFIG_USER_ONLY
4001 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4003 potential_page_fault(s
);
4004 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4010 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4012 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4013 potential_page_fault(s
);
4014 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4015 tcg_temp_free_i32(l
);
4020 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4022 potential_page_fault(s
);
4023 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4024 return_low128(o
->out2
);
4029 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4031 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4032 potential_page_fault(s
);
4033 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4034 tcg_temp_free_i32(l
);
4039 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4041 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4042 potential_page_fault(s
);
4043 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4044 tcg_temp_free_i32(l
);
4048 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4050 int d1
= get_field(s
->fields
, d1
);
4051 int d2
= get_field(s
->fields
, d2
);
4052 int b1
= get_field(s
->fields
, b1
);
4053 int b2
= get_field(s
->fields
, b2
);
4054 int l
= get_field(s
->fields
, l1
);
4057 o
->addr1
= get_address(s
, 0, b1
, d1
);
4059 /* If the addresses are identical, this is a store/memset of zero. */
4060 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4061 o
->in2
= tcg_const_i64(0);
4065 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4068 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4072 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4075 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4079 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4082 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4086 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4088 gen_op_movi_cc(s
, 0);
4092 /* But in general we'll defer to a helper. */
4093 o
->in2
= get_address(s
, 0, b2
, d2
);
4094 t32
= tcg_const_i32(l
);
4095 potential_page_fault(s
);
4096 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4097 tcg_temp_free_i32(t32
);
4102 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4104 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4108 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4110 int shift
= s
->insn
->data
& 0xff;
4111 int size
= s
->insn
->data
>> 8;
4112 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4115 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4116 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4118 /* Produce the CC from only the bits manipulated. */
4119 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4120 set_cc_nz_u64(s
, cc_dst
);
4124 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4126 o
->out
= tcg_const_i64(0);
4130 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4132 o
->out
= tcg_const_i64(0);
4138 /* ====================================================================== */
4139 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4140 the original inputs), update the various cc data structures in order to
4141 be able to compute the new condition code. */
4143 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4145 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4148 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4150 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4153 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4155 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4158 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4160 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4163 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4165 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4168 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4170 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4173 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4175 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4178 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4180 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4183 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4185 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4188 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4190 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4193 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4195 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4198 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4200 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4203 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4205 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4208 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4210 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4213 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4215 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4218 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4220 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4223 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4225 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4228 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4230 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4233 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4235 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4238 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4240 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4241 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4244 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4246 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4249 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4251 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4254 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4256 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4259 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4261 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4264 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4266 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4269 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4271 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4274 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4276 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4279 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4281 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4284 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4286 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4289 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4291 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4294 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4296 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4299 /* ====================================================================== */
4300 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4301 with the TCG register to which we will write. Used in combination with
4302 the "wout" generators, in some cases we need a new temporary, and in
4303 some cases we can write to a TCG global. */
4305 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4307 o
->out
= tcg_temp_new_i64();
4309 #define SPEC_prep_new 0
4311 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4313 o
->out
= tcg_temp_new_i64();
4314 o
->out2
= tcg_temp_new_i64();
4316 #define SPEC_prep_new_P 0
4318 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4320 o
->out
= regs
[get_field(f
, r1
)];
4323 #define SPEC_prep_r1 0
4325 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4327 int r1
= get_field(f
, r1
);
4329 o
->out2
= regs
[r1
+ 1];
4330 o
->g_out
= o
->g_out2
= true;
4332 #define SPEC_prep_r1_P SPEC_r1_even
4334 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4336 o
->out
= fregs
[get_field(f
, r1
)];
4339 #define SPEC_prep_f1 0
4341 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4343 int r1
= get_field(f
, r1
);
4345 o
->out2
= fregs
[r1
+ 2];
4346 o
->g_out
= o
->g_out2
= true;
4348 #define SPEC_prep_x1 SPEC_r1_f128
4350 /* ====================================================================== */
4351 /* The "Write OUTput" generators. These generally perform some non-trivial
4352 copy of data to TCG globals, or to main memory. The trivial cases are
4353 generally handled by having a "prep" generator install the TCG global
4354 as the destination of the operation. */
4356 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4358 store_reg(get_field(f
, r1
), o
->out
);
4360 #define SPEC_wout_r1 0
4362 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4364 int r1
= get_field(f
, r1
);
4365 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4367 #define SPEC_wout_r1_8 0
4369 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4371 int r1
= get_field(f
, r1
);
4372 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4374 #define SPEC_wout_r1_16 0
4376 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4378 store_reg32_i64(get_field(f
, r1
), o
->out
);
4380 #define SPEC_wout_r1_32 0
4382 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4384 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4386 #define SPEC_wout_r1_32h 0
4388 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4390 int r1
= get_field(f
, r1
);
4391 store_reg32_i64(r1
, o
->out
);
4392 store_reg32_i64(r1
+ 1, o
->out2
);
4394 #define SPEC_wout_r1_P32 SPEC_r1_even
4396 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4398 int r1
= get_field(f
, r1
);
4399 store_reg32_i64(r1
+ 1, o
->out
);
4400 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4401 store_reg32_i64(r1
, o
->out
);
4403 #define SPEC_wout_r1_D32 SPEC_r1_even
4405 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4407 store_freg32_i64(get_field(f
, r1
), o
->out
);
4409 #define SPEC_wout_e1 0
4411 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4413 store_freg(get_field(f
, r1
), o
->out
);
4415 #define SPEC_wout_f1 0
4417 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4419 int f1
= get_field(s
->fields
, r1
);
4420 store_freg(f1
, o
->out
);
4421 store_freg(f1
+ 2, o
->out2
);
4423 #define SPEC_wout_x1 SPEC_r1_f128
4425 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4427 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4428 store_reg32_i64(get_field(f
, r1
), o
->out
);
4431 #define SPEC_wout_cond_r1r2_32 0
4433 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4435 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4436 store_freg32_i64(get_field(f
, r1
), o
->out
);
4439 #define SPEC_wout_cond_e1e2 0
4441 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4443 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4445 #define SPEC_wout_m1_8 0
4447 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4449 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4451 #define SPEC_wout_m1_16 0
4453 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4455 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4457 #define SPEC_wout_m1_32 0
4459 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4461 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4463 #define SPEC_wout_m1_64 0
4465 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4467 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4469 #define SPEC_wout_m2_32 0
4471 static void wout_m2_32_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4473 /* XXX release reservation */
4474 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4475 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4477 #define SPEC_wout_m2_32_r1_atomic 0
4479 static void wout_m2_64_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4481 /* XXX release reservation */
4482 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4483 store_reg(get_field(f
, r1
), o
->in2
);
4485 #define SPEC_wout_m2_64_r1_atomic 0
4487 /* ====================================================================== */
4488 /* The "INput 1" generators. These load the first operand to an insn. */
4490 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4492 o
->in1
= load_reg(get_field(f
, r1
));
4494 #define SPEC_in1_r1 0
4496 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4498 o
->in1
= regs
[get_field(f
, r1
)];
4501 #define SPEC_in1_r1_o 0
4503 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4505 o
->in1
= tcg_temp_new_i64();
4506 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4508 #define SPEC_in1_r1_32s 0
4510 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4512 o
->in1
= tcg_temp_new_i64();
4513 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4515 #define SPEC_in1_r1_32u 0
4517 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4519 o
->in1
= tcg_temp_new_i64();
4520 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4522 #define SPEC_in1_r1_sr32 0
4524 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4526 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4528 #define SPEC_in1_r1p1 SPEC_r1_even
4530 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4532 o
->in1
= tcg_temp_new_i64();
4533 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4535 #define SPEC_in1_r1p1_32s SPEC_r1_even
4537 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4539 o
->in1
= tcg_temp_new_i64();
4540 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4542 #define SPEC_in1_r1p1_32u SPEC_r1_even
4544 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4546 int r1
= get_field(f
, r1
);
4547 o
->in1
= tcg_temp_new_i64();
4548 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4550 #define SPEC_in1_r1_D32 SPEC_r1_even
4552 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4554 o
->in1
= load_reg(get_field(f
, r2
));
4556 #define SPEC_in1_r2 0
4558 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4560 o
->in1
= tcg_temp_new_i64();
4561 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4563 #define SPEC_in1_r2_sr32 0
4565 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4567 o
->in1
= load_reg(get_field(f
, r3
));
4569 #define SPEC_in1_r3 0
4571 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4573 o
->in1
= regs
[get_field(f
, r3
)];
4576 #define SPEC_in1_r3_o 0
4578 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4580 o
->in1
= tcg_temp_new_i64();
4581 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4583 #define SPEC_in1_r3_32s 0
4585 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4587 o
->in1
= tcg_temp_new_i64();
4588 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4590 #define SPEC_in1_r3_32u 0
4592 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4594 int r3
= get_field(f
, r3
);
4595 o
->in1
= tcg_temp_new_i64();
4596 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4598 #define SPEC_in1_r3_D32 SPEC_r3_even
4600 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4602 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4604 #define SPEC_in1_e1 0
4606 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4608 o
->in1
= fregs
[get_field(f
, r1
)];
4611 #define SPEC_in1_f1_o 0
4613 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4615 int r1
= get_field(f
, r1
);
4617 o
->out2
= fregs
[r1
+ 2];
4618 o
->g_out
= o
->g_out2
= true;
4620 #define SPEC_in1_x1_o SPEC_r1_f128
4622 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4624 o
->in1
= fregs
[get_field(f
, r3
)];
4627 #define SPEC_in1_f3_o 0
4629 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4631 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4633 #define SPEC_in1_la1 0
4635 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4637 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4638 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4640 #define SPEC_in1_la2 0
4642 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4645 o
->in1
= tcg_temp_new_i64();
4646 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4648 #define SPEC_in1_m1_8u 0
4650 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4653 o
->in1
= tcg_temp_new_i64();
4654 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4656 #define SPEC_in1_m1_16s 0
4658 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4661 o
->in1
= tcg_temp_new_i64();
4662 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4664 #define SPEC_in1_m1_16u 0
4666 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4669 o
->in1
= tcg_temp_new_i64();
4670 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4672 #define SPEC_in1_m1_32s 0
4674 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4677 o
->in1
= tcg_temp_new_i64();
4678 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4680 #define SPEC_in1_m1_32u 0
4682 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4685 o
->in1
= tcg_temp_new_i64();
4686 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4688 #define SPEC_in1_m1_64 0
4690 /* ====================================================================== */
4691 /* The "INput 2" generators. These load the second operand to an insn. */
4693 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4695 o
->in2
= regs
[get_field(f
, r1
)];
4698 #define SPEC_in2_r1_o 0
4700 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4702 o
->in2
= tcg_temp_new_i64();
4703 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4705 #define SPEC_in2_r1_16u 0
4707 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4709 o
->in2
= tcg_temp_new_i64();
4710 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4712 #define SPEC_in2_r1_32u 0
4714 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4716 int r1
= get_field(f
, r1
);
4717 o
->in2
= tcg_temp_new_i64();
4718 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4720 #define SPEC_in2_r1_D32 SPEC_r1_even
4722 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4724 o
->in2
= load_reg(get_field(f
, r2
));
4726 #define SPEC_in2_r2 0
4728 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4730 o
->in2
= regs
[get_field(f
, r2
)];
4733 #define SPEC_in2_r2_o 0
4735 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4737 int r2
= get_field(f
, r2
);
4739 o
->in2
= load_reg(r2
);
4742 #define SPEC_in2_r2_nz 0
4744 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4746 o
->in2
= tcg_temp_new_i64();
4747 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4749 #define SPEC_in2_r2_8s 0
4751 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4753 o
->in2
= tcg_temp_new_i64();
4754 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4756 #define SPEC_in2_r2_8u 0
4758 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4760 o
->in2
= tcg_temp_new_i64();
4761 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4763 #define SPEC_in2_r2_16s 0
4765 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4767 o
->in2
= tcg_temp_new_i64();
4768 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4770 #define SPEC_in2_r2_16u 0
4772 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4774 o
->in2
= load_reg(get_field(f
, r3
));
4776 #define SPEC_in2_r3 0
4778 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4780 o
->in2
= tcg_temp_new_i64();
4781 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4783 #define SPEC_in2_r3_sr32 0
4785 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4787 o
->in2
= tcg_temp_new_i64();
4788 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4790 #define SPEC_in2_r2_32s 0
4792 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4794 o
->in2
= tcg_temp_new_i64();
4795 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4797 #define SPEC_in2_r2_32u 0
4799 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4801 o
->in2
= tcg_temp_new_i64();
4802 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4804 #define SPEC_in2_r2_sr32 0
4806 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4808 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4810 #define SPEC_in2_e2 0
4812 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4814 o
->in2
= fregs
[get_field(f
, r2
)];
4817 #define SPEC_in2_f2_o 0
4819 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4821 int r2
= get_field(f
, r2
);
4823 o
->in2
= fregs
[r2
+ 2];
4824 o
->g_in1
= o
->g_in2
= true;
4826 #define SPEC_in2_x2_o SPEC_r2_f128
4828 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4830 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4832 #define SPEC_in2_ra2 0
4834 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4836 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4837 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4839 #define SPEC_in2_a2 0
4841 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4843 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4845 #define SPEC_in2_ri2 0
4847 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4849 help_l2_shift(s
, f
, o
, 31);
4851 #define SPEC_in2_sh32 0
4853 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4855 help_l2_shift(s
, f
, o
, 63);
4857 #define SPEC_in2_sh64 0
4859 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4862 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4864 #define SPEC_in2_m2_8u 0
4866 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4869 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4871 #define SPEC_in2_m2_16s 0
4873 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4876 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4878 #define SPEC_in2_m2_16u 0
4880 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4883 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4885 #define SPEC_in2_m2_32s 0
4887 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4890 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4892 #define SPEC_in2_m2_32u 0
4894 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4897 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4899 #define SPEC_in2_m2_64 0
4901 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4904 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4906 #define SPEC_in2_mri2_16u 0
4908 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4911 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4913 #define SPEC_in2_mri2_32s 0
4915 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4918 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4920 #define SPEC_in2_mri2_32u 0
4922 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4925 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4927 #define SPEC_in2_mri2_64 0
4929 static void in2_m2_32s_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4931 /* XXX should reserve the address */
4933 o
->in2
= tcg_temp_new_i64();
4934 tcg_gen_qemu_ld32s(o
->in2
, o
->addr1
, get_mem_index(s
));
4936 #define SPEC_in2_m2_32s_atomic 0
4938 static void in2_m2_64_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4940 /* XXX should reserve the address */
4942 o
->in2
= tcg_temp_new_i64();
4943 tcg_gen_qemu_ld64(o
->in2
, o
->addr1
, get_mem_index(s
));
4945 #define SPEC_in2_m2_64_atomic 0
4947 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4949 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4951 #define SPEC_in2_i2 0
4953 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4955 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4957 #define SPEC_in2_i2_8u 0
4959 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4961 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4963 #define SPEC_in2_i2_16u 0
4965 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4967 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4969 #define SPEC_in2_i2_32u 0
4971 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4973 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4974 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4976 #define SPEC_in2_i2_16u_shl 0
4978 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4980 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4981 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4983 #define SPEC_in2_i2_32u_shl 0
4985 #ifndef CONFIG_USER_ONLY
4986 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4988 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
4990 #define SPEC_in2_insn 0
4993 /* ====================================================================== */
4995 /* Find opc within the table of insns. This is formulated as a switch
4996 statement so that (1) we get compile-time notice of cut-paste errors
4997 for duplicated opcodes, and (2) the compiler generates the binary
4998 search tree, rather than us having to post-process the table. */
5000 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5001 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5003 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5005 enum DisasInsnEnum
{
5006 #include "insn-data.def"
5010 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5014 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5016 .help_in1 = in1_##I1, \
5017 .help_in2 = in2_##I2, \
5018 .help_prep = prep_##P, \
5019 .help_wout = wout_##W, \
5020 .help_cout = cout_##CC, \
5021 .help_op = op_##OP, \
5025 /* Allow 0 to be used for NULL in the table below. */
5033 #define SPEC_in1_0 0
5034 #define SPEC_in2_0 0
5035 #define SPEC_prep_0 0
5036 #define SPEC_wout_0 0
5038 static const DisasInsn insn_info
[] = {
5039 #include "insn-data.def"
5043 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5044 case OPC: return &insn_info[insn_ ## NM];
5046 static const DisasInsn
*lookup_opc(uint16_t opc
)
5049 #include "insn-data.def"
5058 /* Extract a field from the insn. The INSN should be left-aligned in
5059 the uint64_t so that we can more easily utilize the big-bit-endian
5060 definitions we extract from the Principals of Operation. */
5062 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5070 /* Zero extract the field from the insn. */
5071 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5073 /* Sign-extend, or un-swap the field as necessary. */
5075 case 0: /* unsigned */
5077 case 1: /* signed */
5078 assert(f
->size
<= 32);
5079 m
= 1u << (f
->size
- 1);
5082 case 2: /* dl+dh split, signed 20 bit. */
5083 r
= ((int8_t)r
<< 12) | (r
>> 8);
5089 /* Validate that the "compressed" encoding we selected above is valid.
5090 I.e. we havn't make two different original fields overlap. */
5091 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5092 o
->presentC
|= 1 << f
->indexC
;
5093 o
->presentO
|= 1 << f
->indexO
;
5095 o
->c
[f
->indexC
] = r
;
5098 /* Lookup the insn at the current PC, extracting the operands into O and
5099 returning the info struct for the insn. Returns NULL for invalid insn. */
5101 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5104 uint64_t insn
, pc
= s
->pc
;
5106 const DisasInsn
*info
;
5108 insn
= ld_code2(env
, pc
);
5109 op
= (insn
>> 8) & 0xff;
5110 ilen
= get_ilen(op
);
5111 s
->next_pc
= s
->pc
+ ilen
;
5118 insn
= ld_code4(env
, pc
) << 32;
5121 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5127 /* We can't actually determine the insn format until we've looked up
5128 the full insn opcode. Which we can't do without locating the
5129 secondary opcode. Assume by default that OP2 is at bit 40; for
5130 those smaller insns that don't actually have a secondary opcode
5131 this will correctly result in OP2 = 0. */
5137 case 0xb2: /* S, RRF, RRE */
5138 case 0xb3: /* RRE, RRD, RRF */
5139 case 0xb9: /* RRE, RRF */
5140 case 0xe5: /* SSE, SIL */
5141 op2
= (insn
<< 8) >> 56;
5145 case 0xc0: /* RIL */
5146 case 0xc2: /* RIL */
5147 case 0xc4: /* RIL */
5148 case 0xc6: /* RIL */
5149 case 0xc8: /* SSF */
5150 case 0xcc: /* RIL */
5151 op2
= (insn
<< 12) >> 60;
5153 case 0xd0 ... 0xdf: /* SS */
5159 case 0xee ... 0xf3: /* SS */
5160 case 0xf8 ... 0xfd: /* SS */
5164 op2
= (insn
<< 40) >> 56;
5168 memset(f
, 0, sizeof(*f
));
5173 /* Lookup the instruction. */
5174 info
= lookup_opc(op
<< 8 | op2
);
5176 /* If we found it, extract the operands. */
5178 DisasFormat fmt
= info
->fmt
;
5181 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5182 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5188 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5190 const DisasInsn
*insn
;
5191 ExitStatus ret
= NO_EXIT
;
5195 /* Search for the insn in the table. */
5196 insn
= extract_insn(env
, s
, &f
);
5198 /* Not found means unimplemented/illegal opcode. */
5200 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5202 gen_illegal_opcode(s
);
5203 return EXIT_NORETURN
;
5206 #ifndef CONFIG_USER_ONLY
5207 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5208 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5209 gen_helper_per_ifetch(cpu_env
, addr
);
5210 tcg_temp_free_i64(addr
);
5214 /* Check for insn specification exceptions. */
5216 int spec
= insn
->spec
, excp
= 0, r
;
5218 if (spec
& SPEC_r1_even
) {
5219 r
= get_field(&f
, r1
);
5221 excp
= PGM_SPECIFICATION
;
5224 if (spec
& SPEC_r2_even
) {
5225 r
= get_field(&f
, r2
);
5227 excp
= PGM_SPECIFICATION
;
5230 if (spec
& SPEC_r3_even
) {
5231 r
= get_field(&f
, r3
);
5233 excp
= PGM_SPECIFICATION
;
5236 if (spec
& SPEC_r1_f128
) {
5237 r
= get_field(&f
, r1
);
5239 excp
= PGM_SPECIFICATION
;
5242 if (spec
& SPEC_r2_f128
) {
5243 r
= get_field(&f
, r2
);
5245 excp
= PGM_SPECIFICATION
;
5249 gen_program_exception(s
, excp
);
5250 return EXIT_NORETURN
;
5254 /* Set up the strutures we use to communicate with the helpers. */
5257 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5258 TCGV_UNUSED_I64(o
.out
);
5259 TCGV_UNUSED_I64(o
.out2
);
5260 TCGV_UNUSED_I64(o
.in1
);
5261 TCGV_UNUSED_I64(o
.in2
);
5262 TCGV_UNUSED_I64(o
.addr1
);
5264 /* Implement the instruction. */
5265 if (insn
->help_in1
) {
5266 insn
->help_in1(s
, &f
, &o
);
5268 if (insn
->help_in2
) {
5269 insn
->help_in2(s
, &f
, &o
);
5271 if (insn
->help_prep
) {
5272 insn
->help_prep(s
, &f
, &o
);
5274 if (insn
->help_op
) {
5275 ret
= insn
->help_op(s
, &o
);
5277 if (insn
->help_wout
) {
5278 insn
->help_wout(s
, &f
, &o
);
5280 if (insn
->help_cout
) {
5281 insn
->help_cout(s
, &o
);
5284 /* Free any temporaries created by the helpers. */
5285 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5286 tcg_temp_free_i64(o
.out
);
5288 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5289 tcg_temp_free_i64(o
.out2
);
5291 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5292 tcg_temp_free_i64(o
.in1
);
5294 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5295 tcg_temp_free_i64(o
.in2
);
5297 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5298 tcg_temp_free_i64(o
.addr1
);
5301 #ifndef CONFIG_USER_ONLY
5302 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5303 /* An exception might be triggered, save PSW if not already done. */
5304 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5305 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5311 /* Call the helper to check for a possible PER exception. */
5312 gen_helper_per_check_exception(cpu_env
);
5316 /* Advance to the next instruction. */
5321 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5323 S390CPU
*cpu
= s390_env_get_cpu(env
);
5324 CPUState
*cs
= CPU(cpu
);
5326 target_ulong pc_start
;
5327 uint64_t next_page_start
;
5328 int num_insns
, max_insns
;
5335 if (!(tb
->flags
& FLAG_MASK_64
)) {
5336 pc_start
&= 0x7fffffff;
5341 dc
.cc_op
= CC_OP_DYNAMIC
;
5342 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5344 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5347 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5348 if (max_insns
== 0) {
5349 max_insns
= CF_COUNT_MASK
;
5351 if (max_insns
> TCG_MAX_INSNS
) {
5352 max_insns
= TCG_MAX_INSNS
;
5358 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5361 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5362 status
= EXIT_PC_STALE
;
5364 /* The address covered by the breakpoint must be included in
5365 [tb->pc, tb->pc + tb->size) in order to for it to be
5366 properly cleared -- thus we increment the PC here so that
5367 the logic setting tb->size below does the right thing. */
5372 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5377 if (status
== NO_EXIT
) {
5378 status
= translate_one(env
, &dc
);
5381 /* If we reach a page boundary, are single stepping,
5382 or exhaust instruction count, stop generation. */
5383 if (status
== NO_EXIT
5384 && (dc
.pc
>= next_page_start
5385 || tcg_op_buf_full()
5386 || num_insns
>= max_insns
5388 || cs
->singlestep_enabled
)) {
5389 status
= EXIT_PC_STALE
;
5391 } while (status
== NO_EXIT
);
5393 if (tb
->cflags
& CF_LAST_IO
) {
5402 update_psw_addr(&dc
);
5404 case EXIT_PC_UPDATED
:
5405 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5406 cc op type is in env */
5408 /* Exit the TB, either by raising a debug exception or by return. */
5410 gen_exception(EXCP_DEBUG
);
5419 gen_tb_end(tb
, num_insns
);
5421 tb
->size
= dc
.pc
- pc_start
;
5422 tb
->icount
= num_insns
;
5424 #if defined(S390X_DEBUG_DISAS)
5425 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5426 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5427 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5433 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5436 int cc_op
= data
[1];
5437 env
->psw
.addr
= data
[0];
5438 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {