4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env
;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
59 bool singlestep_enabled
;
62 /* Information carried about a condition to be evaluated. */
69 struct { TCGv_i64 a
, b
; } s64
;
70 struct { TCGv_i32 a
, b
; } s32
;
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit
[CC_OP_MAX
];
78 static uint64_t inline_branch_miss
[CC_OP_MAX
];
81 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
83 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
84 if (s
->tb
->flags
& FLAG_MASK_32
) {
85 return pc
| 0x80000000;
91 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
94 S390CPU
*cpu
= S390_CPU(cs
);
95 CPUS390XState
*env
= &cpu
->env
;
99 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
100 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
103 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
106 for (i
= 0; i
< 16; i
++) {
107 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
109 cpu_fprintf(f
, "\n");
115 for (i
= 0; i
< 16; i
++) {
116 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
118 cpu_fprintf(f
, "\n");
124 for (i
= 0; i
< 32; i
++) {
125 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
126 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
127 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
130 #ifndef CONFIG_USER_ONLY
131 for (i
= 0; i
< 16; i
++) {
132 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
134 cpu_fprintf(f
, "\n");
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i
= 0; i
< CC_OP_MAX
; i
++) {
143 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
144 inline_branch_miss
[i
], inline_branch_hit
[i
]);
148 cpu_fprintf(f
, "\n");
151 static TCGv_i64 psw_addr
;
152 static TCGv_i64 psw_mask
;
153 static TCGv_i64 gbea
;
155 static TCGv_i32 cc_op
;
156 static TCGv_i64 cc_src
;
157 static TCGv_i64 cc_dst
;
158 static TCGv_i64 cc_vr
;
160 static char cpu_reg_names
[32][4];
161 static TCGv_i64 regs
[16];
162 static TCGv_i64 fregs
[16];
164 void s390x_translate_init(void)
168 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
169 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
170 offsetof(CPUS390XState
, psw
.addr
),
172 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
173 offsetof(CPUS390XState
, psw
.mask
),
175 gbea
= tcg_global_mem_new_i64(TCG_AREG0
,
176 offsetof(CPUS390XState
, gbea
),
179 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
181 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
183 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
185 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
188 for (i
= 0; i
< 16; i
++) {
189 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
190 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
191 offsetof(CPUS390XState
, regs
[i
]),
195 for (i
= 0; i
< 16; i
++) {
196 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
197 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
198 offsetof(CPUS390XState
, vregs
[i
][0].d
),
199 cpu_reg_names
[i
+ 16]);
203 static TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static TCGv_i64
load_freg32_i64(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
217 static void store_reg(int reg
, TCGv_i64 v
)
219 tcg_gen_mov_i64(regs
[reg
], v
);
222 static void store_freg(int reg
, TCGv_i64 v
)
224 tcg_gen_mov_i64(fregs
[reg
], v
);
227 static void store_reg32_i64(int reg
, TCGv_i64 v
)
229 /* 32 bit register writes keep the upper half */
230 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
233 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
235 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
238 static void store_freg32_i64(int reg
, TCGv_i64 v
)
240 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
243 static void return_low128(TCGv_i64 dest
)
245 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
248 static void update_psw_addr(DisasContext
*s
)
251 tcg_gen_movi_i64(psw_addr
, s
->pc
);
254 static void per_branch(DisasContext
*s
, bool to_next
)
256 #ifndef CONFIG_USER_ONLY
257 tcg_gen_movi_i64(gbea
, s
->pc
);
259 if (s
->tb
->flags
& FLAG_MASK_PER
) {
260 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
261 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
263 tcg_temp_free_i64(next_pc
);
269 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
270 TCGv_i64 arg1
, TCGv_i64 arg2
)
272 #ifndef CONFIG_USER_ONLY
273 if (s
->tb
->flags
& FLAG_MASK_PER
) {
274 TCGLabel
*lab
= gen_new_label();
275 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
277 tcg_gen_movi_i64(gbea
, s
->pc
);
278 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
282 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
283 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
284 tcg_temp_free_i64(pc
);
289 static void per_breaking_event(DisasContext
*s
)
291 tcg_gen_movi_i64(gbea
, s
->pc
);
294 static void update_cc_op(DisasContext
*s
)
296 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
297 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
301 static void potential_page_fault(DisasContext
*s
)
307 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
309 return (uint64_t)cpu_lduw_code(env
, pc
);
312 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
314 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
317 static int get_mem_index(DisasContext
*s
)
319 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
320 case PSW_ASC_PRIMARY
>> 32:
322 case PSW_ASC_SECONDARY
>> 32:
324 case PSW_ASC_HOME
>> 32:
332 static void gen_exception(int excp
)
334 TCGv_i32 tmp
= tcg_const_i32(excp
);
335 gen_helper_exception(cpu_env
, tmp
);
336 tcg_temp_free_i32(tmp
);
339 static void gen_program_exception(DisasContext
*s
, int code
)
343 /* Remember what pgm exeption this was. */
344 tmp
= tcg_const_i32(code
);
345 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
346 tcg_temp_free_i32(tmp
);
348 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
349 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
350 tcg_temp_free_i32(tmp
);
352 /* Advance past instruction. */
359 /* Trigger exception. */
360 gen_exception(EXCP_PGM
);
363 static inline void gen_illegal_opcode(DisasContext
*s
)
365 gen_program_exception(s
, PGM_OPERATION
);
368 static inline void gen_trap(DisasContext
*s
)
372 /* Set DXC to 0xff. */
373 t
= tcg_temp_new_i32();
374 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
375 tcg_gen_ori_i32(t
, t
, 0xff00);
376 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
377 tcg_temp_free_i32(t
);
379 gen_program_exception(s
, PGM_DATA
);
382 #ifndef CONFIG_USER_ONLY
383 static void check_privileged(DisasContext
*s
)
385 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
386 gen_program_exception(s
, PGM_PRIVILEGED
);
391 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
393 TCGv_i64 tmp
= tcg_temp_new_i64();
394 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
396 /* Note that d2 is limited to 20 bits, signed. If we crop negative
397 displacements early we create larger immedate addends. */
399 /* Note that addi optimizes the imm==0 case. */
401 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
402 tcg_gen_addi_i64(tmp
, tmp
, d2
);
404 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
406 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
412 tcg_gen_movi_i64(tmp
, d2
);
415 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
421 static inline bool live_cc_data(DisasContext
*s
)
423 return (s
->cc_op
!= CC_OP_DYNAMIC
424 && s
->cc_op
!= CC_OP_STATIC
428 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
430 if (live_cc_data(s
)) {
431 tcg_gen_discard_i64(cc_src
);
432 tcg_gen_discard_i64(cc_dst
);
433 tcg_gen_discard_i64(cc_vr
);
435 s
->cc_op
= CC_OP_CONST0
+ val
;
438 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
440 if (live_cc_data(s
)) {
441 tcg_gen_discard_i64(cc_src
);
442 tcg_gen_discard_i64(cc_vr
);
444 tcg_gen_mov_i64(cc_dst
, dst
);
448 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
451 if (live_cc_data(s
)) {
452 tcg_gen_discard_i64(cc_vr
);
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
459 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
460 TCGv_i64 dst
, TCGv_i64 vr
)
462 tcg_gen_mov_i64(cc_src
, src
);
463 tcg_gen_mov_i64(cc_dst
, dst
);
464 tcg_gen_mov_i64(cc_vr
, vr
);
468 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
470 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
473 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
475 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
478 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
480 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
483 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
485 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
488 /* CC value is in env->cc_op */
489 static void set_cc_static(DisasContext
*s
)
491 if (live_cc_data(s
)) {
492 tcg_gen_discard_i64(cc_src
);
493 tcg_gen_discard_i64(cc_dst
);
494 tcg_gen_discard_i64(cc_vr
);
496 s
->cc_op
= CC_OP_STATIC
;
499 /* calculates cc into cc_op */
500 static void gen_op_calc_cc(DisasContext
*s
)
502 TCGv_i32 local_cc_op
;
505 TCGV_UNUSED_I32(local_cc_op
);
506 TCGV_UNUSED_I64(dummy
);
509 dummy
= tcg_const_i64(0);
523 local_cc_op
= tcg_const_i32(s
->cc_op
);
539 /* s->cc_op is the cc value */
540 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
543 /* env->cc_op already is the cc value */
558 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
563 case CC_OP_LTUGTU_32
:
564 case CC_OP_LTUGTU_64
:
571 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
586 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
589 /* unknown operation - assume 3 arguments and cc_op in env */
590 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
596 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
597 tcg_temp_free_i32(local_cc_op
);
599 if (!TCGV_IS_UNUSED_I64(dummy
)) {
600 tcg_temp_free_i64(dummy
);
603 /* We now have cc in cc_op as constant */
607 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
609 /* NOTE: we handle the case where the TB spans two pages here */
610 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
611 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
612 && !s
->singlestep_enabled
613 && !(s
->tb
->cflags
& CF_LAST_IO
)
614 && !(s
->tb
->flags
& FLAG_MASK_PER
));
617 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
619 #ifdef DEBUG_INLINE_BRANCHES
620 inline_branch_miss
[cc_op
]++;
624 static void account_inline_branch(DisasContext
*s
, int cc_op
)
626 #ifdef DEBUG_INLINE_BRANCHES
627 inline_branch_hit
[cc_op
]++;
631 /* Table of mask values to comparison codes, given a comparison as input.
632 For such, CC=3 should not be possible. */
633 static const TCGCond ltgt_cond
[16] = {
634 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
635 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
636 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
637 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
638 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
639 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
640 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
641 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
644 /* Table of mask values to comparison codes, given a logic op as input.
645 For such, only CC=0 and CC=1 should be possible. */
646 static const TCGCond nz_cond
[16] = {
647 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
648 TCG_COND_NEVER
, TCG_COND_NEVER
,
649 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
650 TCG_COND_NE
, TCG_COND_NE
,
651 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
652 TCG_COND_EQ
, TCG_COND_EQ
,
653 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
654 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
657 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
658 details required to generate a TCG comparison. */
659 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
662 enum cc_op old_cc_op
= s
->cc_op
;
664 if (mask
== 15 || mask
== 0) {
665 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
668 c
->g1
= c
->g2
= true;
673 /* Find the TCG condition for the mask + cc op. */
679 cond
= ltgt_cond
[mask
];
680 if (cond
== TCG_COND_NEVER
) {
683 account_inline_branch(s
, old_cc_op
);
686 case CC_OP_LTUGTU_32
:
687 case CC_OP_LTUGTU_64
:
688 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
689 if (cond
== TCG_COND_NEVER
) {
692 account_inline_branch(s
, old_cc_op
);
696 cond
= nz_cond
[mask
];
697 if (cond
== TCG_COND_NEVER
) {
700 account_inline_branch(s
, old_cc_op
);
715 account_inline_branch(s
, old_cc_op
);
730 account_inline_branch(s
, old_cc_op
);
734 switch (mask
& 0xa) {
735 case 8: /* src == 0 -> no one bit found */
738 case 2: /* src != 0 -> one bit found */
744 account_inline_branch(s
, old_cc_op
);
750 case 8 | 2: /* vr == 0 */
753 case 4 | 1: /* vr != 0 */
756 case 8 | 4: /* no carry -> vr >= src */
759 case 2 | 1: /* carry -> vr < src */
765 account_inline_branch(s
, old_cc_op
);
770 /* Note that CC=0 is impossible; treat it as dont-care. */
772 case 2: /* zero -> op1 == op2 */
775 case 4 | 1: /* !zero -> op1 != op2 */
778 case 4: /* borrow (!carry) -> op1 < op2 */
781 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
787 account_inline_branch(s
, old_cc_op
);
792 /* Calculate cc value. */
797 /* Jump based on CC. We'll load up the real cond below;
798 the assignment here merely avoids a compiler warning. */
799 account_noninline_branch(s
, old_cc_op
);
800 old_cc_op
= CC_OP_STATIC
;
801 cond
= TCG_COND_NEVER
;
805 /* Load up the arguments of the comparison. */
807 c
->g1
= c
->g2
= false;
811 c
->u
.s32
.a
= tcg_temp_new_i32();
812 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
813 c
->u
.s32
.b
= tcg_const_i32(0);
816 case CC_OP_LTUGTU_32
:
819 c
->u
.s32
.a
= tcg_temp_new_i32();
820 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
821 c
->u
.s32
.b
= tcg_temp_new_i32();
822 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
829 c
->u
.s64
.b
= tcg_const_i64(0);
833 case CC_OP_LTUGTU_64
:
837 c
->g1
= c
->g2
= true;
843 c
->u
.s64
.a
= tcg_temp_new_i64();
844 c
->u
.s64
.b
= tcg_const_i64(0);
845 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
850 c
->u
.s32
.a
= tcg_temp_new_i32();
851 c
->u
.s32
.b
= tcg_temp_new_i32();
852 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
853 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
854 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
856 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
863 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
864 c
->u
.s64
.b
= tcg_const_i64(0);
876 case 0x8 | 0x4 | 0x2: /* cc != 3 */
878 c
->u
.s32
.b
= tcg_const_i32(3);
880 case 0x8 | 0x4 | 0x1: /* cc != 2 */
882 c
->u
.s32
.b
= tcg_const_i32(2);
884 case 0x8 | 0x2 | 0x1: /* cc != 1 */
886 c
->u
.s32
.b
= tcg_const_i32(1);
888 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
891 c
->u
.s32
.a
= tcg_temp_new_i32();
892 c
->u
.s32
.b
= tcg_const_i32(0);
893 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
895 case 0x8 | 0x4: /* cc < 2 */
897 c
->u
.s32
.b
= tcg_const_i32(2);
899 case 0x8: /* cc == 0 */
901 c
->u
.s32
.b
= tcg_const_i32(0);
903 case 0x4 | 0x2 | 0x1: /* cc != 0 */
905 c
->u
.s32
.b
= tcg_const_i32(0);
907 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
910 c
->u
.s32
.a
= tcg_temp_new_i32();
911 c
->u
.s32
.b
= tcg_const_i32(0);
912 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
914 case 0x4: /* cc == 1 */
916 c
->u
.s32
.b
= tcg_const_i32(1);
918 case 0x2 | 0x1: /* cc > 1 */
920 c
->u
.s32
.b
= tcg_const_i32(1);
922 case 0x2: /* cc == 2 */
924 c
->u
.s32
.b
= tcg_const_i32(2);
926 case 0x1: /* cc == 3 */
928 c
->u
.s32
.b
= tcg_const_i32(3);
931 /* CC is masked by something else: (8 >> cc) & mask. */
934 c
->u
.s32
.a
= tcg_const_i32(8);
935 c
->u
.s32
.b
= tcg_const_i32(0);
936 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
937 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
948 static void free_compare(DisasCompare
*c
)
952 tcg_temp_free_i64(c
->u
.s64
.a
);
954 tcg_temp_free_i32(c
->u
.s32
.a
);
959 tcg_temp_free_i64(c
->u
.s64
.b
);
961 tcg_temp_free_i32(c
->u
.s32
.b
);
966 /* ====================================================================== */
967 /* Define the insn format enumeration. */
968 #define F0(N) FMT_##N,
969 #define F1(N, X1) F0(N)
970 #define F2(N, X1, X2) F0(N)
971 #define F3(N, X1, X2, X3) F0(N)
972 #define F4(N, X1, X2, X3, X4) F0(N)
973 #define F5(N, X1, X2, X3, X4, X5) F0(N)
976 #include "insn-format.def"
986 /* Define a structure to hold the decoded fields. We'll store each inside
987 an array indexed by an enum. In order to conserve memory, we'll arrange
988 for fields that do not exist at the same time to overlap, thus the "C"
989 for compact. For checking purposes there is an "O" for original index
990 as well that will be applied to availability bitmaps. */
992 enum DisasFieldIndexO
{
1015 enum DisasFieldIndexC
{
1046 struct DisasFields
{
1050 unsigned presentC
:16;
1051 unsigned int presentO
;
1055 /* This is the way fields are to be accessed out of DisasFields. */
1056 #define have_field(S, F) have_field1((S), FLD_O_##F)
1057 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1059 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1061 return (f
->presentO
>> c
) & 1;
1064 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1065 enum DisasFieldIndexC c
)
1067 assert(have_field1(f
, o
));
1071 /* Describe the layout of each field in each format. */
1072 typedef struct DisasField
{
1074 unsigned int size
:8;
1075 unsigned int type
:2;
1076 unsigned int indexC
:6;
1077 enum DisasFieldIndexO indexO
:8;
1080 typedef struct DisasFormatInfo
{
1081 DisasField op
[NUM_C_FIELD
];
1084 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1085 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1086 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1087 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1088 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1089 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1090 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1091 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1092 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1093 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1094 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1095 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1096 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1097 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1099 #define F0(N) { { } },
1100 #define F1(N, X1) { { X1 } },
1101 #define F2(N, X1, X2) { { X1, X2 } },
1102 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1103 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1104 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1106 static const DisasFormatInfo format_info
[] = {
1107 #include "insn-format.def"
1125 /* Generally, we'll extract operands into this structures, operate upon
1126 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1127 of routines below for more details. */
1129 bool g_out
, g_out2
, g_in1
, g_in2
;
1130 TCGv_i64 out
, out2
, in1
, in2
;
1134 /* Instructions can place constraints on their operands, raising specification
1135 exceptions if they are violated. To make this easy to automate, each "in1",
1136 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1137 of the following, or 0. To make this easy to document, we'll put the
1138 SPEC_<name> defines next to <name>. */
1140 #define SPEC_r1_even 1
1141 #define SPEC_r2_even 2
1142 #define SPEC_r3_even 4
1143 #define SPEC_r1_f128 8
1144 #define SPEC_r2_f128 16
1146 /* Return values from translate_one, indicating the state of the TB. */
1148 /* Continue the TB. */
1150 /* We have emitted one or more goto_tb. No fixup required. */
1152 /* We are not using a goto_tb (for whatever reason), but have updated
1153 the PC (for whatever reason), so there's no need to do it again on
1156 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1157 updated the PC for the next instruction to be executed. */
1159 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1160 No following code will be executed. */
1164 typedef enum DisasFacility
{
1165 FAC_Z
, /* zarch (default) */
1166 FAC_CASS
, /* compare and swap and store */
1167 FAC_CASS2
, /* compare and swap and store 2*/
1168 FAC_DFP
, /* decimal floating point */
1169 FAC_DFPR
, /* decimal floating point rounding */
1170 FAC_DO
, /* distinct operands */
1171 FAC_EE
, /* execute extensions */
1172 FAC_EI
, /* extended immediate */
1173 FAC_FPE
, /* floating point extension */
1174 FAC_FPSSH
, /* floating point support sign handling */
1175 FAC_FPRGR
, /* FPR-GR transfer */
1176 FAC_GIE
, /* general instructions extension */
1177 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1178 FAC_HW
, /* high-word */
1179 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1180 FAC_MIE
, /* miscellaneous-instruction-extensions */
1181 FAC_LAT
, /* load-and-trap */
1182 FAC_LOC
, /* load/store on condition */
1183 FAC_LD
, /* long displacement */
1184 FAC_PC
, /* population count */
1185 FAC_SCF
, /* store clock fast */
1186 FAC_SFLE
, /* store facility list extended */
1187 FAC_ILA
, /* interlocked access facility 1 */
1193 DisasFacility fac
:8;
1198 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1199 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1200 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1201 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1202 void (*help_cout
)(DisasContext
*, DisasOps
*);
1203 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1208 /* ====================================================================== */
1209 /* Miscellaneous helpers, used by several operations. */
1211 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1212 DisasOps
*o
, int mask
)
1214 int b2
= get_field(f
, b2
);
1215 int d2
= get_field(f
, d2
);
1218 o
->in2
= tcg_const_i64(d2
& mask
);
1220 o
->in2
= get_address(s
, 0, b2
, d2
);
1221 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1225 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1227 if (dest
== s
->next_pc
) {
1228 per_branch(s
, true);
1231 if (use_goto_tb(s
, dest
)) {
1233 per_breaking_event(s
);
1235 tcg_gen_movi_i64(psw_addr
, dest
);
1236 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1237 return EXIT_GOTO_TB
;
1239 tcg_gen_movi_i64(psw_addr
, dest
);
1240 per_branch(s
, false);
1241 return EXIT_PC_UPDATED
;
1245 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1246 bool is_imm
, int imm
, TCGv_i64 cdest
)
1249 uint64_t dest
= s
->pc
+ 2 * imm
;
1252 /* Take care of the special cases first. */
1253 if (c
->cond
== TCG_COND_NEVER
) {
1258 if (dest
== s
->next_pc
) {
1259 /* Branch to next. */
1260 per_branch(s
, true);
1264 if (c
->cond
== TCG_COND_ALWAYS
) {
1265 ret
= help_goto_direct(s
, dest
);
1269 if (TCGV_IS_UNUSED_I64(cdest
)) {
1270 /* E.g. bcr %r0 -> no branch. */
1274 if (c
->cond
== TCG_COND_ALWAYS
) {
1275 tcg_gen_mov_i64(psw_addr
, cdest
);
1276 per_branch(s
, false);
1277 ret
= EXIT_PC_UPDATED
;
1282 if (use_goto_tb(s
, s
->next_pc
)) {
1283 if (is_imm
&& use_goto_tb(s
, dest
)) {
1284 /* Both exits can use goto_tb. */
1287 lab
= gen_new_label();
1289 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1291 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1294 /* Branch not taken. */
1296 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1297 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1301 per_breaking_event(s
);
1303 tcg_gen_movi_i64(psw_addr
, dest
);
1304 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1308 /* Fallthru can use goto_tb, but taken branch cannot. */
1309 /* Store taken branch destination before the brcond. This
1310 avoids having to allocate a new local temp to hold it.
1311 We'll overwrite this in the not taken case anyway. */
1313 tcg_gen_mov_i64(psw_addr
, cdest
);
1316 lab
= gen_new_label();
1318 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1320 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1323 /* Branch not taken. */
1326 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1327 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1331 tcg_gen_movi_i64(psw_addr
, dest
);
1333 per_breaking_event(s
);
1334 ret
= EXIT_PC_UPDATED
;
1337 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1338 Most commonly we're single-stepping or some other condition that
1339 disables all use of goto_tb. Just update the PC and exit. */
1341 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1343 cdest
= tcg_const_i64(dest
);
1347 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1349 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1351 TCGv_i32 t0
= tcg_temp_new_i32();
1352 TCGv_i64 t1
= tcg_temp_new_i64();
1353 TCGv_i64 z
= tcg_const_i64(0);
1354 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1355 tcg_gen_extu_i32_i64(t1
, t0
);
1356 tcg_temp_free_i32(t0
);
1357 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1358 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1359 tcg_temp_free_i64(t1
);
1360 tcg_temp_free_i64(z
);
1364 tcg_temp_free_i64(cdest
);
1366 tcg_temp_free_i64(next
);
1368 ret
= EXIT_PC_UPDATED
;
1376 /* ====================================================================== */
1377 /* The operations. These perform the bulk of the work for any insn,
1378 usually after the operands have been loaded and output initialized. */
1380 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1383 z
= tcg_const_i64(0);
1384 n
= tcg_temp_new_i64();
1385 tcg_gen_neg_i64(n
, o
->in2
);
1386 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1387 tcg_temp_free_i64(n
);
1388 tcg_temp_free_i64(z
);
1392 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1394 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1398 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1400 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1404 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1406 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1407 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1411 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1413 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1417 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1422 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1424 /* The carry flag is the msb of CC, therefore the branch mask that would
1425 create that comparison is 3. Feeding the generated comparison to
1426 setcond produces the carry flag that we desire. */
1427 disas_jcc(s
, &cmp
, 3);
1428 carry
= tcg_temp_new_i64();
1430 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1432 TCGv_i32 t
= tcg_temp_new_i32();
1433 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1434 tcg_gen_extu_i32_i64(carry
, t
);
1435 tcg_temp_free_i32(t
);
1439 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1440 tcg_temp_free_i64(carry
);
1444 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1446 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1450 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1452 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1456 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1458 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1459 return_low128(o
->out2
);
1463 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1465 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1469 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1471 int shift
= s
->insn
->data
& 0xff;
1472 int size
= s
->insn
->data
>> 8;
1473 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1476 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1477 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1478 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1480 /* Produce the CC from only the bits manipulated. */
1481 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1482 set_cc_nz_u64(s
, cc_dst
);
1486 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1488 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1489 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1490 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1491 per_branch(s
, false);
1492 return EXIT_PC_UPDATED
;
1498 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1500 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1501 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1504 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1506 int m1
= get_field(s
->fields
, m1
);
1507 bool is_imm
= have_field(s
->fields
, i2
);
1508 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1511 disas_jcc(s
, &c
, m1
);
1512 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1515 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1517 int r1
= get_field(s
->fields
, r1
);
1518 bool is_imm
= have_field(s
->fields
, i2
);
1519 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1523 c
.cond
= TCG_COND_NE
;
1528 t
= tcg_temp_new_i64();
1529 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1530 store_reg32_i64(r1
, t
);
1531 c
.u
.s32
.a
= tcg_temp_new_i32();
1532 c
.u
.s32
.b
= tcg_const_i32(0);
1533 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1534 tcg_temp_free_i64(t
);
1536 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1539 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1541 int r1
= get_field(s
->fields
, r1
);
1542 int imm
= get_field(s
->fields
, i2
);
1546 c
.cond
= TCG_COND_NE
;
1551 t
= tcg_temp_new_i64();
1552 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1553 tcg_gen_subi_i64(t
, t
, 1);
1554 store_reg32h_i64(r1
, t
);
1555 c
.u
.s32
.a
= tcg_temp_new_i32();
1556 c
.u
.s32
.b
= tcg_const_i32(0);
1557 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1558 tcg_temp_free_i64(t
);
1560 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1563 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1565 int r1
= get_field(s
->fields
, r1
);
1566 bool is_imm
= have_field(s
->fields
, i2
);
1567 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1570 c
.cond
= TCG_COND_NE
;
1575 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1576 c
.u
.s64
.a
= regs
[r1
];
1577 c
.u
.s64
.b
= tcg_const_i64(0);
1579 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1582 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1584 int r1
= get_field(s
->fields
, r1
);
1585 int r3
= get_field(s
->fields
, r3
);
1586 bool is_imm
= have_field(s
->fields
, i2
);
1587 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1591 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1596 t
= tcg_temp_new_i64();
1597 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1598 c
.u
.s32
.a
= tcg_temp_new_i32();
1599 c
.u
.s32
.b
= tcg_temp_new_i32();
1600 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1601 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1602 store_reg32_i64(r1
, t
);
1603 tcg_temp_free_i64(t
);
1605 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1608 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1610 int r1
= get_field(s
->fields
, r1
);
1611 int r3
= get_field(s
->fields
, r3
);
1612 bool is_imm
= have_field(s
->fields
, i2
);
1613 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1616 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1619 if (r1
== (r3
| 1)) {
1620 c
.u
.s64
.b
= load_reg(r3
| 1);
1623 c
.u
.s64
.b
= regs
[r3
| 1];
1627 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1628 c
.u
.s64
.a
= regs
[r1
];
1631 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1634 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1636 int imm
, m3
= get_field(s
->fields
, m3
);
1640 c
.cond
= ltgt_cond
[m3
];
1641 if (s
->insn
->data
) {
1642 c
.cond
= tcg_unsigned_cond(c
.cond
);
1644 c
.is_64
= c
.g1
= c
.g2
= true;
1648 is_imm
= have_field(s
->fields
, i4
);
1650 imm
= get_field(s
->fields
, i4
);
1653 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1654 get_field(s
->fields
, d4
));
1657 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1660 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1662 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1667 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1669 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1674 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1676 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1681 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1683 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1684 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1685 tcg_temp_free_i32(m3
);
1686 gen_set_cc_nz_f32(s
, o
->in2
);
1690 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1692 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1693 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1694 tcg_temp_free_i32(m3
);
1695 gen_set_cc_nz_f64(s
, o
->in2
);
1699 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1701 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1702 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1703 tcg_temp_free_i32(m3
);
1704 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1708 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1710 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1711 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1712 tcg_temp_free_i32(m3
);
1713 gen_set_cc_nz_f32(s
, o
->in2
);
1717 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1719 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1720 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1721 tcg_temp_free_i32(m3
);
1722 gen_set_cc_nz_f64(s
, o
->in2
);
1726 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1728 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1729 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1730 tcg_temp_free_i32(m3
);
1731 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1735 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1737 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1738 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1739 tcg_temp_free_i32(m3
);
1740 gen_set_cc_nz_f32(s
, o
->in2
);
1744 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1746 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1747 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1748 tcg_temp_free_i32(m3
);
1749 gen_set_cc_nz_f64(s
, o
->in2
);
1753 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1755 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1756 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1757 tcg_temp_free_i32(m3
);
1758 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1762 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1764 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1765 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1766 tcg_temp_free_i32(m3
);
1767 gen_set_cc_nz_f32(s
, o
->in2
);
1771 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1773 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1774 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1775 tcg_temp_free_i32(m3
);
1776 gen_set_cc_nz_f64(s
, o
->in2
);
1780 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1782 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1783 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1784 tcg_temp_free_i32(m3
);
1785 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1789 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1791 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1792 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1793 tcg_temp_free_i32(m3
);
1797 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1801 tcg_temp_free_i32(m3
);
1805 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1807 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1808 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1809 tcg_temp_free_i32(m3
);
1810 return_low128(o
->out2
);
1814 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1816 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1817 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1818 tcg_temp_free_i32(m3
);
1822 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1824 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1825 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1826 tcg_temp_free_i32(m3
);
1830 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1832 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1833 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1834 tcg_temp_free_i32(m3
);
1835 return_low128(o
->out2
);
1839 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1841 int r2
= get_field(s
->fields
, r2
);
1842 TCGv_i64 len
= tcg_temp_new_i64();
1844 potential_page_fault(s
);
1845 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1847 return_low128(o
->out
);
1849 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1850 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1851 tcg_temp_free_i64(len
);
1856 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1858 int l
= get_field(s
->fields
, l1
);
1863 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1864 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1867 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1868 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1871 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1872 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1875 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1876 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1879 potential_page_fault(s
);
1880 vl
= tcg_const_i32(l
);
1881 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1882 tcg_temp_free_i32(vl
);
1886 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1890 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1892 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1893 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1894 potential_page_fault(s
);
1895 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1896 tcg_temp_free_i32(r1
);
1897 tcg_temp_free_i32(r3
);
1902 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1904 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1905 TCGv_i32 t1
= tcg_temp_new_i32();
1906 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1907 potential_page_fault(s
);
1908 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1910 tcg_temp_free_i32(t1
);
1911 tcg_temp_free_i32(m3
);
1915 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1917 potential_page_fault(s
);
1918 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1920 return_low128(o
->in2
);
1924 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1926 TCGv_i64 t
= tcg_temp_new_i64();
1927 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1928 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1929 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1930 tcg_temp_free_i64(t
);
1934 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1936 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1937 int d2
= get_field(s
->fields
, d2
);
1938 int b2
= get_field(s
->fields
, b2
);
1939 int is_64
= s
->insn
->data
;
1940 TCGv_i64 addr
, mem
, cc
, z
;
1942 /* Note that in1 = R3 (new value) and
1943 in2 = (zero-extended) R1 (expected value). */
1945 /* Load the memory into the (temporary) output. While the PoO only talks
1946 about moving the memory to R1 on inequality, if we include equality it
1947 means that R1 is equal to the memory in all conditions. */
1948 addr
= get_address(s
, 0, b2
, d2
);
1950 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1952 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1955 /* Are the memory and expected values (un)equal? Note that this setcond
1956 produces the output CC value, thus the NE sense of the test. */
1957 cc
= tcg_temp_new_i64();
1958 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1960 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1961 Recall that we are allowed to unconditionally issue the store (and
1962 thus any possible write trap), so (re-)store the original contents
1963 of MEM in case of inequality. */
1964 z
= tcg_const_i64(0);
1965 mem
= tcg_temp_new_i64();
1966 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1968 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1970 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1972 tcg_temp_free_i64(z
);
1973 tcg_temp_free_i64(mem
);
1974 tcg_temp_free_i64(addr
);
1976 /* Store CC back to cc_op. Wait until after the store so that any
1977 exception gets the old cc_op value. */
1978 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1979 tcg_temp_free_i64(cc
);
1984 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1986 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1987 int r1
= get_field(s
->fields
, r1
);
1988 int r3
= get_field(s
->fields
, r3
);
1989 int d2
= get_field(s
->fields
, d2
);
1990 int b2
= get_field(s
->fields
, b2
);
1991 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
1993 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1995 addrh
= get_address(s
, 0, b2
, d2
);
1996 addrl
= get_address(s
, 0, b2
, d2
+ 8);
1997 outh
= tcg_temp_new_i64();
1998 outl
= tcg_temp_new_i64();
2000 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
2001 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
2003 /* Fold the double-word compare with arithmetic. */
2004 cc
= tcg_temp_new_i64();
2005 z
= tcg_temp_new_i64();
2006 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
2007 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
2008 tcg_gen_or_i64(cc
, cc
, z
);
2009 tcg_gen_movi_i64(z
, 0);
2010 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
2012 memh
= tcg_temp_new_i64();
2013 meml
= tcg_temp_new_i64();
2014 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
2015 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
2016 tcg_temp_free_i64(z
);
2018 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
2019 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
2020 tcg_temp_free_i64(memh
);
2021 tcg_temp_free_i64(meml
);
2022 tcg_temp_free_i64(addrh
);
2023 tcg_temp_free_i64(addrl
);
2025 /* Save back state now that we've passed all exceptions. */
2026 tcg_gen_mov_i64(regs
[r1
], outh
);
2027 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
2028 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2029 tcg_temp_free_i64(outh
);
2030 tcg_temp_free_i64(outl
);
2031 tcg_temp_free_i64(cc
);
2036 #ifndef CONFIG_USER_ONLY
2037 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2039 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2040 check_privileged(s
);
2041 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
2042 tcg_temp_free_i32(r1
);
2048 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2050 TCGv_i64 t1
= tcg_temp_new_i64();
2051 TCGv_i32 t2
= tcg_temp_new_i32();
2052 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2053 gen_helper_cvd(t1
, t2
);
2054 tcg_temp_free_i32(t2
);
2055 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2056 tcg_temp_free_i64(t1
);
2060 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2062 int m3
= get_field(s
->fields
, m3
);
2063 TCGLabel
*lab
= gen_new_label();
2066 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2067 if (s
->insn
->data
) {
2068 c
= tcg_unsigned_cond(c
);
2070 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2079 #ifndef CONFIG_USER_ONLY
2080 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2082 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2083 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2084 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2086 check_privileged(s
);
2090 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2092 tcg_temp_free_i32(func_code
);
2093 tcg_temp_free_i32(r3
);
2094 tcg_temp_free_i32(r1
);
2099 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2101 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2102 return_low128(o
->out
);
2106 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2108 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2109 return_low128(o
->out
);
2113 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2115 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2116 return_low128(o
->out
);
2120 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2122 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2123 return_low128(o
->out
);
2127 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2129 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2133 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2135 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2139 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2141 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2142 return_low128(o
->out2
);
2146 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2148 int r2
= get_field(s
->fields
, r2
);
2149 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2153 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2155 /* No cache information provided. */
2156 tcg_gen_movi_i64(o
->out
, -1);
2160 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2162 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2166 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2168 int r1
= get_field(s
->fields
, r1
);
2169 int r2
= get_field(s
->fields
, r2
);
2170 TCGv_i64 t
= tcg_temp_new_i64();
2172 /* Note the "subsequently" in the PoO, which implies a defined result
2173 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2174 tcg_gen_shri_i64(t
, psw_mask
, 32);
2175 store_reg32_i64(r1
, t
);
2177 store_reg32_i64(r2
, psw_mask
);
2180 tcg_temp_free_i64(t
);
2184 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2186 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2187 tb->flags, (ab)use the tb->cs_base field as the address of
2188 the template in memory, and grab 8 bits of tb->flags/cflags for
2189 the contents of the register. We would then recognize all this
2190 in gen_intermediate_code_internal, generating code for exactly
2191 one instruction. This new TB then gets executed normally.
2193 On the other hand, this seems to be mostly used for modifying
2194 MVC inside of memcpy, which needs a helper call anyway. So
2195 perhaps this doesn't bear thinking about any further. */
2202 tmp
= tcg_const_i64(s
->next_pc
);
2203 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2204 tcg_temp_free_i64(tmp
);
2209 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2211 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2212 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2213 tcg_temp_free_i32(m3
);
2217 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2219 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2220 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2221 tcg_temp_free_i32(m3
);
2225 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2227 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2228 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2229 return_low128(o
->out2
);
2230 tcg_temp_free_i32(m3
);
2234 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2236 /* We'll use the original input for cc computation, since we get to
2237 compare that against 0, which ought to be better than comparing
2238 the real output against 64. It also lets cc_dst be a convenient
2239 temporary during our computation. */
2240 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2242 /* R1 = IN ? CLZ(IN) : 64. */
2243 gen_helper_clz(o
->out
, o
->in2
);
2245 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2246 value by 64, which is undefined. But since the shift is 64 iff the
2247 input is zero, we still get the correct result after and'ing. */
2248 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2249 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2250 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2254 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2256 int m3
= get_field(s
->fields
, m3
);
2257 int pos
, len
, base
= s
->insn
->data
;
2258 TCGv_i64 tmp
= tcg_temp_new_i64();
2263 /* Effectively a 32-bit load. */
2264 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2271 /* Effectively a 16-bit load. */
2272 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2280 /* Effectively an 8-bit load. */
2281 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2286 pos
= base
+ ctz32(m3
) * 8;
2287 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2288 ccm
= ((1ull << len
) - 1) << pos
;
2292 /* This is going to be a sequence of loads and inserts. */
2293 pos
= base
+ 32 - 8;
2297 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2298 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2299 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2302 m3
= (m3
<< 1) & 0xf;
2308 tcg_gen_movi_i64(tmp
, ccm
);
2309 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2310 tcg_temp_free_i64(tmp
);
2314 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2316 int shift
= s
->insn
->data
& 0xff;
2317 int size
= s
->insn
->data
>> 8;
2318 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2322 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2327 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2329 t1
= tcg_temp_new_i64();
2330 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2331 tcg_gen_shri_i64(t1
, t1
, 36);
2332 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2334 tcg_gen_extu_i32_i64(t1
, cc_op
);
2335 tcg_gen_shli_i64(t1
, t1
, 28);
2336 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2337 tcg_temp_free_i64(t1
);
2341 #ifndef CONFIG_USER_ONLY
2342 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2344 check_privileged(s
);
2345 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2349 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2351 check_privileged(s
);
2352 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2357 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2359 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2363 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2365 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2369 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2371 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2375 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2377 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2381 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2383 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2384 return_low128(o
->out2
);
2388 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2390 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2391 return_low128(o
->out2
);
2395 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2397 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2401 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2403 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2407 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2409 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2413 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2415 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2419 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2421 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2425 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2427 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2431 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2433 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2437 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2439 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2443 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2445 TCGLabel
*lab
= gen_new_label();
2446 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2447 /* The value is stored even in case of trap. */
2448 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2454 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2456 TCGLabel
*lab
= gen_new_label();
2457 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2458 /* The value is stored even in case of trap. */
2459 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2465 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2467 TCGLabel
*lab
= gen_new_label();
2468 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2469 /* The value is stored even in case of trap. */
2470 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2476 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2478 TCGLabel
*lab
= gen_new_label();
2479 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2480 /* The value is stored even in case of trap. */
2481 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2487 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2489 TCGLabel
*lab
= gen_new_label();
2490 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2491 /* The value is stored even in case of trap. */
2492 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2498 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2502 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2505 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2509 TCGv_i32 t32
= tcg_temp_new_i32();
2512 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2515 t
= tcg_temp_new_i64();
2516 tcg_gen_extu_i32_i64(t
, t32
);
2517 tcg_temp_free_i32(t32
);
2519 z
= tcg_const_i64(0);
2520 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2521 tcg_temp_free_i64(t
);
2522 tcg_temp_free_i64(z
);
2528 #ifndef CONFIG_USER_ONLY
2529 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2531 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2532 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2533 check_privileged(s
);
2534 potential_page_fault(s
);
2535 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2536 tcg_temp_free_i32(r1
);
2537 tcg_temp_free_i32(r3
);
2541 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2543 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2544 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2545 check_privileged(s
);
2546 potential_page_fault(s
);
2547 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2548 tcg_temp_free_i32(r1
);
2549 tcg_temp_free_i32(r3
);
2552 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2554 check_privileged(s
);
2555 potential_page_fault(s
);
2556 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2561 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2565 check_privileged(s
);
2566 per_breaking_event(s
);
2568 t1
= tcg_temp_new_i64();
2569 t2
= tcg_temp_new_i64();
2570 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2571 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2572 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2573 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2574 tcg_gen_shli_i64(t1
, t1
, 32);
2575 gen_helper_load_psw(cpu_env
, t1
, t2
);
2576 tcg_temp_free_i64(t1
);
2577 tcg_temp_free_i64(t2
);
2578 return EXIT_NORETURN
;
2581 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2585 check_privileged(s
);
2586 per_breaking_event(s
);
2588 t1
= tcg_temp_new_i64();
2589 t2
= tcg_temp_new_i64();
2590 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2591 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2592 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2593 gen_helper_load_psw(cpu_env
, t1
, t2
);
2594 tcg_temp_free_i64(t1
);
2595 tcg_temp_free_i64(t2
);
2596 return EXIT_NORETURN
;
2600 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2602 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2603 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2604 potential_page_fault(s
);
2605 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2606 tcg_temp_free_i32(r1
);
2607 tcg_temp_free_i32(r3
);
2611 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2613 int r1
= get_field(s
->fields
, r1
);
2614 int r3
= get_field(s
->fields
, r3
);
2617 /* Only one register to read. */
2618 t1
= tcg_temp_new_i64();
2619 if (unlikely(r1
== r3
)) {
2620 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2621 store_reg32_i64(r1
, t1
);
2626 /* First load the values of the first and last registers to trigger
2627 possible page faults. */
2628 t2
= tcg_temp_new_i64();
2629 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2630 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2631 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2632 store_reg32_i64(r1
, t1
);
2633 store_reg32_i64(r3
, t2
);
2635 /* Only two registers to read. */
2636 if (((r1
+ 1) & 15) == r3
) {
2642 /* Then load the remaining registers. Page fault can't occur. */
2644 tcg_gen_movi_i64(t2
, 4);
2647 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2648 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2649 store_reg32_i64(r1
, t1
);
2657 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2659 int r1
= get_field(s
->fields
, r1
);
2660 int r3
= get_field(s
->fields
, r3
);
2663 /* Only one register to read. */
2664 t1
= tcg_temp_new_i64();
2665 if (unlikely(r1
== r3
)) {
2666 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2667 store_reg32h_i64(r1
, t1
);
2672 /* First load the values of the first and last registers to trigger
2673 possible page faults. */
2674 t2
= tcg_temp_new_i64();
2675 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2676 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2677 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2678 store_reg32h_i64(r1
, t1
);
2679 store_reg32h_i64(r3
, t2
);
2681 /* Only two registers to read. */
2682 if (((r1
+ 1) & 15) == r3
) {
2688 /* Then load the remaining registers. Page fault can't occur. */
2690 tcg_gen_movi_i64(t2
, 4);
2693 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2694 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2695 store_reg32h_i64(r1
, t1
);
2703 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2705 int r1
= get_field(s
->fields
, r1
);
2706 int r3
= get_field(s
->fields
, r3
);
2709 /* Only one register to read. */
2710 if (unlikely(r1
== r3
)) {
2711 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2715 /* First load the values of the first and last registers to trigger
2716 possible page faults. */
2717 t1
= tcg_temp_new_i64();
2718 t2
= tcg_temp_new_i64();
2719 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2720 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2721 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2722 tcg_gen_mov_i64(regs
[r1
], t1
);
2725 /* Only two registers to read. */
2726 if (((r1
+ 1) & 15) == r3
) {
2731 /* Then load the remaining registers. Page fault can't occur. */
2733 tcg_gen_movi_i64(t1
, 8);
2736 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2737 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2744 #ifndef CONFIG_USER_ONLY
2745 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2747 check_privileged(s
);
2748 potential_page_fault(s
);
2749 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2753 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2755 check_privileged(s
);
2756 potential_page_fault(s
);
2757 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2762 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2765 o
->g_out
= o
->g_in2
;
2766 TCGV_UNUSED_I64(o
->in2
);
2771 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2773 int b2
= get_field(s
->fields
, b2
);
2774 TCGv ar1
= tcg_temp_new_i64();
2777 o
->g_out
= o
->g_in2
;
2778 TCGV_UNUSED_I64(o
->in2
);
2781 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2782 case PSW_ASC_PRIMARY
>> 32:
2783 tcg_gen_movi_i64(ar1
, 0);
2785 case PSW_ASC_ACCREG
>> 32:
2786 tcg_gen_movi_i64(ar1
, 1);
2788 case PSW_ASC_SECONDARY
>> 32:
2790 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2792 tcg_gen_movi_i64(ar1
, 0);
2795 case PSW_ASC_HOME
>> 32:
2796 tcg_gen_movi_i64(ar1
, 2);
2800 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2801 tcg_temp_free_i64(ar1
);
2806 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2810 o
->g_out
= o
->g_in1
;
2811 o
->g_out2
= o
->g_in2
;
2812 TCGV_UNUSED_I64(o
->in1
);
2813 TCGV_UNUSED_I64(o
->in2
);
2814 o
->g_in1
= o
->g_in2
= false;
2818 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2820 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2821 potential_page_fault(s
);
2822 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2823 tcg_temp_free_i32(l
);
2827 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2829 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2830 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2831 potential_page_fault(s
);
2832 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2833 tcg_temp_free_i32(r1
);
2834 tcg_temp_free_i32(r2
);
2839 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2841 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2842 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2843 potential_page_fault(s
);
2844 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2845 tcg_temp_free_i32(r1
);
2846 tcg_temp_free_i32(r3
);
2851 #ifndef CONFIG_USER_ONLY
2852 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2854 int r1
= get_field(s
->fields
, l1
);
2855 check_privileged(s
);
2856 potential_page_fault(s
);
2857 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2862 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2864 int r1
= get_field(s
->fields
, l1
);
2865 check_privileged(s
);
2866 potential_page_fault(s
);
2867 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2873 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2875 potential_page_fault(s
);
2876 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2881 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2883 potential_page_fault(s
);
2884 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2886 return_low128(o
->in2
);
2890 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2892 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2896 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2898 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2902 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2904 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2908 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2910 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2914 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2916 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2920 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2922 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2923 return_low128(o
->out2
);
2927 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2929 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2930 return_low128(o
->out2
);
2934 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2936 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2937 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2938 tcg_temp_free_i64(r3
);
2942 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2944 int r3
= get_field(s
->fields
, r3
);
2945 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2949 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2951 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2952 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2953 tcg_temp_free_i64(r3
);
2957 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2959 int r3
= get_field(s
->fields
, r3
);
2960 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2964 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2967 z
= tcg_const_i64(0);
2968 n
= tcg_temp_new_i64();
2969 tcg_gen_neg_i64(n
, o
->in2
);
2970 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
2971 tcg_temp_free_i64(n
);
2972 tcg_temp_free_i64(z
);
2976 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2978 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2982 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2984 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2988 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2990 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2991 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2995 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2997 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2998 potential_page_fault(s
);
2999 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3000 tcg_temp_free_i32(l
);
3005 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3007 tcg_gen_neg_i64(o
->out
, o
->in2
);
3011 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3013 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3017 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3019 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3023 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3025 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3026 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3030 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3032 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3033 potential_page_fault(s
);
3034 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3035 tcg_temp_free_i32(l
);
3040 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3042 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3046 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3048 int shift
= s
->insn
->data
& 0xff;
3049 int size
= s
->insn
->data
>> 8;
3050 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3053 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3054 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3056 /* Produce the CC from only the bits manipulated. */
3057 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3058 set_cc_nz_u64(s
, cc_dst
);
3062 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3064 gen_helper_popcnt(o
->out
, o
->in2
);
3068 #ifndef CONFIG_USER_ONLY
3069 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3071 check_privileged(s
);
3072 gen_helper_ptlb(cpu_env
);
3077 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3079 int i3
= get_field(s
->fields
, i3
);
3080 int i4
= get_field(s
->fields
, i4
);
3081 int i5
= get_field(s
->fields
, i5
);
3082 int do_zero
= i4
& 0x80;
3083 uint64_t mask
, imask
, pmask
;
3086 /* Adjust the arguments for the specific insn. */
3087 switch (s
->fields
->op2
) {
3088 case 0x55: /* risbg */
3093 case 0x5d: /* risbhg */
3096 pmask
= 0xffffffff00000000ull
;
3098 case 0x51: /* risblg */
3101 pmask
= 0x00000000ffffffffull
;
3107 /* MASK is the set of bits to be inserted from R2.
3108 Take care for I3/I4 wraparound. */
3111 mask
^= pmask
>> i4
>> 1;
3113 mask
|= ~(pmask
>> i4
>> 1);
3117 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3118 insns, we need to keep the other half of the register. */
3119 imask
= ~mask
| ~pmask
;
3121 if (s
->fields
->op2
== 0x55) {
3128 /* In some cases we can implement this with deposit, which can be more
3129 efficient on some hosts. */
3130 if (~mask
== imask
&& i3
<= i4
) {
3131 if (s
->fields
->op2
== 0x5d) {
3134 /* Note that we rotate the bits to be inserted to the lsb, not to
3135 the position as described in the PoO. */
3138 rot
= (i5
- pos
) & 63;
3144 /* Rotate the input as necessary. */
3145 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3147 /* Insert the selected bits into the output. */
3149 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3150 } else if (imask
== 0) {
3151 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3153 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3154 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3155 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3160 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3162 int i3
= get_field(s
->fields
, i3
);
3163 int i4
= get_field(s
->fields
, i4
);
3164 int i5
= get_field(s
->fields
, i5
);
3167 /* If this is a test-only form, arrange to discard the result. */
3169 o
->out
= tcg_temp_new_i64();
3177 /* MASK is the set of bits to be operated on from R2.
3178 Take care for I3/I4 wraparound. */
3181 mask
^= ~0ull >> i4
>> 1;
3183 mask
|= ~(~0ull >> i4
>> 1);
3186 /* Rotate the input as necessary. */
3187 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3190 switch (s
->fields
->op2
) {
3191 case 0x55: /* AND */
3192 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3193 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3196 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3197 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3199 case 0x57: /* XOR */
3200 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3201 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3208 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3209 set_cc_nz_u64(s
, cc_dst
);
3213 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3215 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3219 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3221 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3225 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3227 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3231 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3233 TCGv_i32 t1
= tcg_temp_new_i32();
3234 TCGv_i32 t2
= tcg_temp_new_i32();
3235 TCGv_i32 to
= tcg_temp_new_i32();
3236 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3237 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3238 tcg_gen_rotl_i32(to
, t1
, t2
);
3239 tcg_gen_extu_i32_i64(o
->out
, to
);
3240 tcg_temp_free_i32(t1
);
3241 tcg_temp_free_i32(t2
);
3242 tcg_temp_free_i32(to
);
3246 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3248 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3252 #ifndef CONFIG_USER_ONLY
3253 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3255 check_privileged(s
);
3256 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3261 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3263 check_privileged(s
);
3264 gen_helper_sacf(cpu_env
, o
->in2
);
3265 /* Addressing mode has changed, so end the block. */
3266 return EXIT_PC_STALE
;
3270 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3272 int sam
= s
->insn
->data
;
3288 /* Bizarre but true, we check the address of the current insn for the
3289 specification exception, not the next to be executed. Thus the PoO
3290 documents that Bad Things Happen two bytes before the end. */
3291 if (s
->pc
& ~mask
) {
3292 gen_program_exception(s
, PGM_SPECIFICATION
);
3293 return EXIT_NORETURN
;
3297 tsam
= tcg_const_i64(sam
);
3298 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3299 tcg_temp_free_i64(tsam
);
3301 /* Always exit the TB, since we (may have) changed execution mode. */
3302 return EXIT_PC_STALE
;
3305 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3307 int r1
= get_field(s
->fields
, r1
);
3308 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3312 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3314 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3318 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3320 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3324 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3326 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3327 return_low128(o
->out2
);
3331 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3333 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3337 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3339 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3343 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3345 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3346 return_low128(o
->out2
);
3350 #ifndef CONFIG_USER_ONLY
3351 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3353 check_privileged(s
);
3354 potential_page_fault(s
);
3355 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3360 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3362 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3363 check_privileged(s
);
3364 potential_page_fault(s
);
3365 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3366 tcg_temp_free_i32(r1
);
3371 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3378 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3380 /* We want to store when the condition is fulfilled, so branch
3381 out when it's not */
3382 c
.cond
= tcg_invert_cond(c
.cond
);
3384 lab
= gen_new_label();
3386 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3388 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3392 r1
= get_field(s
->fields
, r1
);
3393 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3394 if (s
->insn
->data
) {
3395 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3397 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3399 tcg_temp_free_i64(a
);
3405 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3407 uint64_t sign
= 1ull << s
->insn
->data
;
3408 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3409 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3410 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3411 /* The arithmetic left shift is curious in that it does not affect
3412 the sign bit. Copy that over from the source unchanged. */
3413 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3414 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3415 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3419 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3421 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3425 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3427 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3431 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3433 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3437 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3439 gen_helper_sfpc(cpu_env
, o
->in2
);
3443 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3445 gen_helper_sfas(cpu_env
, o
->in2
);
3449 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3451 int b2
= get_field(s
->fields
, b2
);
3452 int d2
= get_field(s
->fields
, d2
);
3453 TCGv_i64 t1
= tcg_temp_new_i64();
3454 TCGv_i64 t2
= tcg_temp_new_i64();
3457 switch (s
->fields
->op2
) {
3458 case 0x99: /* SRNM */
3461 case 0xb8: /* SRNMB */
3464 case 0xb9: /* SRNMT */
3470 mask
= (1 << len
) - 1;
3472 /* Insert the value into the appropriate field of the FPC. */
3474 tcg_gen_movi_i64(t1
, d2
& mask
);
3476 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3477 tcg_gen_andi_i64(t1
, t1
, mask
);
3479 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3480 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3481 tcg_temp_free_i64(t1
);
3483 /* Then install the new FPC to set the rounding mode in fpu_status. */
3484 gen_helper_sfpc(cpu_env
, t2
);
3485 tcg_temp_free_i64(t2
);
3489 #ifndef CONFIG_USER_ONLY
3490 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3492 check_privileged(s
);
3493 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3494 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3498 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3500 check_privileged(s
);
3501 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3505 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3507 check_privileged(s
);
3508 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3512 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3514 check_privileged(s
);
3515 /* ??? Surely cpu address != cpu number. In any case the previous
3516 version of this stored more than the required half-word, so it
3517 is unlikely this has ever been tested. */
3518 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3522 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3524 gen_helper_stck(o
->out
, cpu_env
);
3525 /* ??? We don't implement clock states. */
3526 gen_op_movi_cc(s
, 0);
3530 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3532 TCGv_i64 c1
= tcg_temp_new_i64();
3533 TCGv_i64 c2
= tcg_temp_new_i64();
3534 gen_helper_stck(c1
, cpu_env
);
3535 /* Shift the 64-bit value into its place as a zero-extended
3536 104-bit value. Note that "bit positions 64-103 are always
3537 non-zero so that they compare differently to STCK"; we set
3538 the least significant bit to 1. */
3539 tcg_gen_shli_i64(c2
, c1
, 56);
3540 tcg_gen_shri_i64(c1
, c1
, 8);
3541 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3542 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3543 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3544 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3545 tcg_temp_free_i64(c1
);
3546 tcg_temp_free_i64(c2
);
3547 /* ??? We don't implement clock states. */
3548 gen_op_movi_cc(s
, 0);
3552 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3554 check_privileged(s
);
3555 gen_helper_sckc(cpu_env
, o
->in2
);
3559 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3561 check_privileged(s
);
3562 gen_helper_stckc(o
->out
, cpu_env
);
3566 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3568 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3569 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3570 check_privileged(s
);
3571 potential_page_fault(s
);
3572 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3573 tcg_temp_free_i32(r1
);
3574 tcg_temp_free_i32(r3
);
3578 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3580 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3581 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3582 check_privileged(s
);
3583 potential_page_fault(s
);
3584 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3585 tcg_temp_free_i32(r1
);
3586 tcg_temp_free_i32(r3
);
3590 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3592 TCGv_i64 t1
= tcg_temp_new_i64();
3594 check_privileged(s
);
3595 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3596 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3597 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3598 tcg_temp_free_i64(t1
);
3603 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3605 check_privileged(s
);
3606 gen_helper_spt(cpu_env
, o
->in2
);
3610 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3613 /* We really ought to have more complete indication of facilities
3614 that we implement. Address this when STFLE is implemented. */
3615 check_privileged(s
);
3616 f
= tcg_const_i64(0xc0000000);
3617 a
= tcg_const_i64(200);
3618 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3619 tcg_temp_free_i64(f
);
3620 tcg_temp_free_i64(a
);
3624 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3626 check_privileged(s
);
3627 gen_helper_stpt(o
->out
, cpu_env
);
3631 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3633 check_privileged(s
);
3634 potential_page_fault(s
);
3635 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3640 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3642 check_privileged(s
);
3643 gen_helper_spx(cpu_env
, o
->in2
);
3647 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3649 check_privileged(s
);
3650 potential_page_fault(s
);
3651 gen_helper_xsch(cpu_env
, regs
[1]);
3656 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3658 check_privileged(s
);
3659 potential_page_fault(s
);
3660 gen_helper_csch(cpu_env
, regs
[1]);
3665 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3667 check_privileged(s
);
3668 potential_page_fault(s
);
3669 gen_helper_hsch(cpu_env
, regs
[1]);
3674 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3676 check_privileged(s
);
3677 potential_page_fault(s
);
3678 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3683 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3685 check_privileged(s
);
3686 potential_page_fault(s
);
3687 gen_helper_rchp(cpu_env
, regs
[1]);
3692 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3694 check_privileged(s
);
3695 potential_page_fault(s
);
3696 gen_helper_rsch(cpu_env
, regs
[1]);
3701 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3703 check_privileged(s
);
3704 potential_page_fault(s
);
3705 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
3710 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
3712 check_privileged(s
);
3713 potential_page_fault(s
);
3714 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
3719 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
3721 check_privileged(s
);
3722 potential_page_fault(s
);
3723 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
3728 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
3730 check_privileged(s
);
3731 potential_page_fault(s
);
3732 gen_helper_chsc(cpu_env
, o
->in2
);
3737 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3739 check_privileged(s
);
3740 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3741 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3745 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3747 uint64_t i2
= get_field(s
->fields
, i2
);
3750 check_privileged(s
);
3752 /* It is important to do what the instruction name says: STORE THEN.
3753 If we let the output hook perform the store then if we fault and
3754 restart, we'll have the wrong SYSTEM MASK in place. */
3755 t
= tcg_temp_new_i64();
3756 tcg_gen_shri_i64(t
, psw_mask
, 56);
3757 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3758 tcg_temp_free_i64(t
);
3760 if (s
->fields
->op
== 0xac) {
3761 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3762 (i2
<< 56) | 0x00ffffffffffffffull
);
3764 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3769 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3771 check_privileged(s
);
3772 potential_page_fault(s
);
3773 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3777 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3779 check_privileged(s
);
3780 potential_page_fault(s
);
3781 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3786 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3788 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3792 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3794 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3798 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3800 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3804 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3806 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3810 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3812 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3813 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3814 potential_page_fault(s
);
3815 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3816 tcg_temp_free_i32(r1
);
3817 tcg_temp_free_i32(r3
);
3821 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3823 int m3
= get_field(s
->fields
, m3
);
3824 int pos
, base
= s
->insn
->data
;
3825 TCGv_i64 tmp
= tcg_temp_new_i64();
3827 pos
= base
+ ctz32(m3
) * 8;
3830 /* Effectively a 32-bit store. */
3831 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3832 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3838 /* Effectively a 16-bit store. */
3839 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3840 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3847 /* Effectively an 8-bit store. */
3848 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3849 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3853 /* This is going to be a sequence of shifts and stores. */
3854 pos
= base
+ 32 - 8;
3857 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3858 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3859 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3861 m3
= (m3
<< 1) & 0xf;
3866 tcg_temp_free_i64(tmp
);
3870 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3872 int r1
= get_field(s
->fields
, r1
);
3873 int r3
= get_field(s
->fields
, r3
);
3874 int size
= s
->insn
->data
;
3875 TCGv_i64 tsize
= tcg_const_i64(size
);
3879 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3881 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3886 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3890 tcg_temp_free_i64(tsize
);
3894 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3896 int r1
= get_field(s
->fields
, r1
);
3897 int r3
= get_field(s
->fields
, r3
);
3898 TCGv_i64 t
= tcg_temp_new_i64();
3899 TCGv_i64 t4
= tcg_const_i64(4);
3900 TCGv_i64 t32
= tcg_const_i64(32);
3903 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3904 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3908 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3912 tcg_temp_free_i64(t
);
3913 tcg_temp_free_i64(t4
);
3914 tcg_temp_free_i64(t32
);
3918 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3920 potential_page_fault(s
);
3921 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3923 return_low128(o
->in2
);
3927 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3929 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3933 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3938 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3940 /* The !borrow flag is the msb of CC. Since we want the inverse of
3941 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3942 disas_jcc(s
, &cmp
, 8 | 4);
3943 borrow
= tcg_temp_new_i64();
3945 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3947 TCGv_i32 t
= tcg_temp_new_i32();
3948 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3949 tcg_gen_extu_i32_i64(borrow
, t
);
3950 tcg_temp_free_i32(t
);
3954 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3955 tcg_temp_free_i64(borrow
);
3959 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3966 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3967 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3968 tcg_temp_free_i32(t
);
3970 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3971 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3972 tcg_temp_free_i32(t
);
3974 gen_exception(EXCP_SVC
);
3975 return EXIT_NORETURN
;
3978 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3980 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3985 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3987 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3992 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3994 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3999 #ifndef CONFIG_USER_ONLY
4000 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4002 potential_page_fault(s
);
4003 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4009 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4011 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4012 potential_page_fault(s
);
4013 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4014 tcg_temp_free_i32(l
);
4019 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4021 potential_page_fault(s
);
4022 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4023 return_low128(o
->out2
);
4028 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4030 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4031 potential_page_fault(s
);
4032 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4033 tcg_temp_free_i32(l
);
4038 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4040 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4041 potential_page_fault(s
);
4042 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4043 tcg_temp_free_i32(l
);
4047 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4049 int d1
= get_field(s
->fields
, d1
);
4050 int d2
= get_field(s
->fields
, d2
);
4051 int b1
= get_field(s
->fields
, b1
);
4052 int b2
= get_field(s
->fields
, b2
);
4053 int l
= get_field(s
->fields
, l1
);
4056 o
->addr1
= get_address(s
, 0, b1
, d1
);
4058 /* If the addresses are identical, this is a store/memset of zero. */
4059 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4060 o
->in2
= tcg_const_i64(0);
4064 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4067 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4071 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4074 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4078 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4081 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4085 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4087 gen_op_movi_cc(s
, 0);
4091 /* But in general we'll defer to a helper. */
4092 o
->in2
= get_address(s
, 0, b2
, d2
);
4093 t32
= tcg_const_i32(l
);
4094 potential_page_fault(s
);
4095 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4096 tcg_temp_free_i32(t32
);
4101 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4103 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4107 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4109 int shift
= s
->insn
->data
& 0xff;
4110 int size
= s
->insn
->data
>> 8;
4111 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4114 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4115 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4117 /* Produce the CC from only the bits manipulated. */
4118 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4119 set_cc_nz_u64(s
, cc_dst
);
4123 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4125 o
->out
= tcg_const_i64(0);
4129 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4131 o
->out
= tcg_const_i64(0);
4137 /* ====================================================================== */
4138 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4139 the original inputs), update the various cc data structures in order to
4140 be able to compute the new condition code. */
4142 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4144 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4147 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4149 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4152 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4154 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4157 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4159 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4162 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4164 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4167 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4169 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4172 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4174 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4177 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4179 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4182 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4184 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4187 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4189 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4192 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4194 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4197 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4199 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4202 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4204 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4207 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4209 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4212 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4214 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4217 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4219 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4222 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4224 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4227 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4229 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4232 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4234 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4237 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4239 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4240 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4243 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4245 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4248 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4250 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4253 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4255 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4258 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4260 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4263 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4265 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4268 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4270 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4273 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4275 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4278 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4280 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4283 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4285 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4288 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4290 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4293 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4295 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4298 /* ====================================================================== */
4299 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4300 with the TCG register to which we will write. Used in combination with
4301 the "wout" generators, in some cases we need a new temporary, and in
4302 some cases we can write to a TCG global. */
4304 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4306 o
->out
= tcg_temp_new_i64();
4308 #define SPEC_prep_new 0
4310 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4312 o
->out
= tcg_temp_new_i64();
4313 o
->out2
= tcg_temp_new_i64();
4315 #define SPEC_prep_new_P 0
4317 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4319 o
->out
= regs
[get_field(f
, r1
)];
4322 #define SPEC_prep_r1 0
4324 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4326 int r1
= get_field(f
, r1
);
4328 o
->out2
= regs
[r1
+ 1];
4329 o
->g_out
= o
->g_out2
= true;
4331 #define SPEC_prep_r1_P SPEC_r1_even
4333 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4335 o
->out
= fregs
[get_field(f
, r1
)];
4338 #define SPEC_prep_f1 0
4340 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4342 int r1
= get_field(f
, r1
);
4344 o
->out2
= fregs
[r1
+ 2];
4345 o
->g_out
= o
->g_out2
= true;
4347 #define SPEC_prep_x1 SPEC_r1_f128
4349 /* ====================================================================== */
4350 /* The "Write OUTput" generators. These generally perform some non-trivial
4351 copy of data to TCG globals, or to main memory. The trivial cases are
4352 generally handled by having a "prep" generator install the TCG global
4353 as the destination of the operation. */
4355 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4357 store_reg(get_field(f
, r1
), o
->out
);
4359 #define SPEC_wout_r1 0
4361 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4363 int r1
= get_field(f
, r1
);
4364 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4366 #define SPEC_wout_r1_8 0
4368 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4370 int r1
= get_field(f
, r1
);
4371 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4373 #define SPEC_wout_r1_16 0
4375 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4377 store_reg32_i64(get_field(f
, r1
), o
->out
);
4379 #define SPEC_wout_r1_32 0
4381 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4383 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4385 #define SPEC_wout_r1_32h 0
4387 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4389 int r1
= get_field(f
, r1
);
4390 store_reg32_i64(r1
, o
->out
);
4391 store_reg32_i64(r1
+ 1, o
->out2
);
4393 #define SPEC_wout_r1_P32 SPEC_r1_even
4395 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4397 int r1
= get_field(f
, r1
);
4398 store_reg32_i64(r1
+ 1, o
->out
);
4399 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4400 store_reg32_i64(r1
, o
->out
);
4402 #define SPEC_wout_r1_D32 SPEC_r1_even
4404 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4406 store_freg32_i64(get_field(f
, r1
), o
->out
);
4408 #define SPEC_wout_e1 0
4410 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4412 store_freg(get_field(f
, r1
), o
->out
);
4414 #define SPEC_wout_f1 0
4416 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4418 int f1
= get_field(s
->fields
, r1
);
4419 store_freg(f1
, o
->out
);
4420 store_freg(f1
+ 2, o
->out2
);
4422 #define SPEC_wout_x1 SPEC_r1_f128
4424 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4426 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4427 store_reg32_i64(get_field(f
, r1
), o
->out
);
4430 #define SPEC_wout_cond_r1r2_32 0
4432 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4434 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4435 store_freg32_i64(get_field(f
, r1
), o
->out
);
4438 #define SPEC_wout_cond_e1e2 0
4440 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4442 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4444 #define SPEC_wout_m1_8 0
4446 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4448 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4450 #define SPEC_wout_m1_16 0
4452 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4454 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4456 #define SPEC_wout_m1_32 0
4458 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4460 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4462 #define SPEC_wout_m1_64 0
4464 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4466 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4468 #define SPEC_wout_m2_32 0
4470 static void wout_m2_32_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4472 /* XXX release reservation */
4473 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4474 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4476 #define SPEC_wout_m2_32_r1_atomic 0
4478 static void wout_m2_64_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4480 /* XXX release reservation */
4481 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4482 store_reg(get_field(f
, r1
), o
->in2
);
4484 #define SPEC_wout_m2_64_r1_atomic 0
4486 /* ====================================================================== */
4487 /* The "INput 1" generators. These load the first operand to an insn. */
4489 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4491 o
->in1
= load_reg(get_field(f
, r1
));
4493 #define SPEC_in1_r1 0
4495 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4497 o
->in1
= regs
[get_field(f
, r1
)];
4500 #define SPEC_in1_r1_o 0
4502 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4504 o
->in1
= tcg_temp_new_i64();
4505 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4507 #define SPEC_in1_r1_32s 0
4509 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4511 o
->in1
= tcg_temp_new_i64();
4512 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4514 #define SPEC_in1_r1_32u 0
4516 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4518 o
->in1
= tcg_temp_new_i64();
4519 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4521 #define SPEC_in1_r1_sr32 0
4523 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4525 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4527 #define SPEC_in1_r1p1 SPEC_r1_even
4529 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4531 o
->in1
= tcg_temp_new_i64();
4532 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4534 #define SPEC_in1_r1p1_32s SPEC_r1_even
4536 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4538 o
->in1
= tcg_temp_new_i64();
4539 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4541 #define SPEC_in1_r1p1_32u SPEC_r1_even
4543 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4545 int r1
= get_field(f
, r1
);
4546 o
->in1
= tcg_temp_new_i64();
4547 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4549 #define SPEC_in1_r1_D32 SPEC_r1_even
4551 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4553 o
->in1
= load_reg(get_field(f
, r2
));
4555 #define SPEC_in1_r2 0
4557 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4559 o
->in1
= tcg_temp_new_i64();
4560 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
4562 #define SPEC_in1_r2_sr32 0
4564 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4566 o
->in1
= load_reg(get_field(f
, r3
));
4568 #define SPEC_in1_r3 0
4570 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4572 o
->in1
= regs
[get_field(f
, r3
)];
4575 #define SPEC_in1_r3_o 0
4577 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4579 o
->in1
= tcg_temp_new_i64();
4580 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4582 #define SPEC_in1_r3_32s 0
4584 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4586 o
->in1
= tcg_temp_new_i64();
4587 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4589 #define SPEC_in1_r3_32u 0
4591 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4593 int r3
= get_field(f
, r3
);
4594 o
->in1
= tcg_temp_new_i64();
4595 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4597 #define SPEC_in1_r3_D32 SPEC_r3_even
4599 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4601 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4603 #define SPEC_in1_e1 0
4605 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4607 o
->in1
= fregs
[get_field(f
, r1
)];
4610 #define SPEC_in1_f1_o 0
4612 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4614 int r1
= get_field(f
, r1
);
4616 o
->out2
= fregs
[r1
+ 2];
4617 o
->g_out
= o
->g_out2
= true;
4619 #define SPEC_in1_x1_o SPEC_r1_f128
4621 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4623 o
->in1
= fregs
[get_field(f
, r3
)];
4626 #define SPEC_in1_f3_o 0
4628 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4630 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4632 #define SPEC_in1_la1 0
4634 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4636 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4637 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4639 #define SPEC_in1_la2 0
4641 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4644 o
->in1
= tcg_temp_new_i64();
4645 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4647 #define SPEC_in1_m1_8u 0
4649 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4652 o
->in1
= tcg_temp_new_i64();
4653 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4655 #define SPEC_in1_m1_16s 0
4657 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4660 o
->in1
= tcg_temp_new_i64();
4661 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4663 #define SPEC_in1_m1_16u 0
4665 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4668 o
->in1
= tcg_temp_new_i64();
4669 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4671 #define SPEC_in1_m1_32s 0
4673 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4676 o
->in1
= tcg_temp_new_i64();
4677 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4679 #define SPEC_in1_m1_32u 0
4681 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4684 o
->in1
= tcg_temp_new_i64();
4685 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4687 #define SPEC_in1_m1_64 0
4689 /* ====================================================================== */
4690 /* The "INput 2" generators. These load the second operand to an insn. */
4692 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4694 o
->in2
= regs
[get_field(f
, r1
)];
4697 #define SPEC_in2_r1_o 0
4699 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4701 o
->in2
= tcg_temp_new_i64();
4702 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4704 #define SPEC_in2_r1_16u 0
4706 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4708 o
->in2
= tcg_temp_new_i64();
4709 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4711 #define SPEC_in2_r1_32u 0
4713 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4715 int r1
= get_field(f
, r1
);
4716 o
->in2
= tcg_temp_new_i64();
4717 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4719 #define SPEC_in2_r1_D32 SPEC_r1_even
4721 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4723 o
->in2
= load_reg(get_field(f
, r2
));
4725 #define SPEC_in2_r2 0
4727 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4729 o
->in2
= regs
[get_field(f
, r2
)];
4732 #define SPEC_in2_r2_o 0
4734 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4736 int r2
= get_field(f
, r2
);
4738 o
->in2
= load_reg(r2
);
4741 #define SPEC_in2_r2_nz 0
4743 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4745 o
->in2
= tcg_temp_new_i64();
4746 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4748 #define SPEC_in2_r2_8s 0
4750 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4752 o
->in2
= tcg_temp_new_i64();
4753 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4755 #define SPEC_in2_r2_8u 0
4757 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4759 o
->in2
= tcg_temp_new_i64();
4760 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4762 #define SPEC_in2_r2_16s 0
4764 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4766 o
->in2
= tcg_temp_new_i64();
4767 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4769 #define SPEC_in2_r2_16u 0
4771 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4773 o
->in2
= load_reg(get_field(f
, r3
));
4775 #define SPEC_in2_r3 0
4777 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4779 o
->in2
= tcg_temp_new_i64();
4780 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
4782 #define SPEC_in2_r3_sr32 0
4784 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4786 o
->in2
= tcg_temp_new_i64();
4787 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4789 #define SPEC_in2_r2_32s 0
4791 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4793 o
->in2
= tcg_temp_new_i64();
4794 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4796 #define SPEC_in2_r2_32u 0
4798 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4800 o
->in2
= tcg_temp_new_i64();
4801 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
4803 #define SPEC_in2_r2_sr32 0
4805 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4807 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4809 #define SPEC_in2_e2 0
4811 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4813 o
->in2
= fregs
[get_field(f
, r2
)];
4816 #define SPEC_in2_f2_o 0
4818 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4820 int r2
= get_field(f
, r2
);
4822 o
->in2
= fregs
[r2
+ 2];
4823 o
->g_in1
= o
->g_in2
= true;
4825 #define SPEC_in2_x2_o SPEC_r2_f128
4827 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4829 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4831 #define SPEC_in2_ra2 0
4833 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4835 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4836 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4838 #define SPEC_in2_a2 0
4840 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4842 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4844 #define SPEC_in2_ri2 0
4846 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4848 help_l2_shift(s
, f
, o
, 31);
4850 #define SPEC_in2_sh32 0
4852 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4854 help_l2_shift(s
, f
, o
, 63);
4856 #define SPEC_in2_sh64 0
4858 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4861 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4863 #define SPEC_in2_m2_8u 0
4865 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4868 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4870 #define SPEC_in2_m2_16s 0
4872 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4875 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4877 #define SPEC_in2_m2_16u 0
4879 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4882 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4884 #define SPEC_in2_m2_32s 0
4886 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4889 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4891 #define SPEC_in2_m2_32u 0
4893 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4896 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4898 #define SPEC_in2_m2_64 0
4900 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4903 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4905 #define SPEC_in2_mri2_16u 0
4907 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4910 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4912 #define SPEC_in2_mri2_32s 0
4914 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4917 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4919 #define SPEC_in2_mri2_32u 0
4921 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4924 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4926 #define SPEC_in2_mri2_64 0
4928 static void in2_m2_32s_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4930 /* XXX should reserve the address */
4932 o
->in2
= tcg_temp_new_i64();
4933 tcg_gen_qemu_ld32s(o
->in2
, o
->addr1
, get_mem_index(s
));
4935 #define SPEC_in2_m2_32s_atomic 0
4937 static void in2_m2_64_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4939 /* XXX should reserve the address */
4941 o
->in2
= tcg_temp_new_i64();
4942 tcg_gen_qemu_ld64(o
->in2
, o
->addr1
, get_mem_index(s
));
4944 #define SPEC_in2_m2_64_atomic 0
4946 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4948 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4950 #define SPEC_in2_i2 0
4952 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4954 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4956 #define SPEC_in2_i2_8u 0
4958 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4960 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4962 #define SPEC_in2_i2_16u 0
4964 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4966 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4968 #define SPEC_in2_i2_32u 0
4970 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4972 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4973 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4975 #define SPEC_in2_i2_16u_shl 0
4977 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4979 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4980 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4982 #define SPEC_in2_i2_32u_shl 0
4984 #ifndef CONFIG_USER_ONLY
4985 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4987 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
4989 #define SPEC_in2_insn 0
4992 /* ====================================================================== */
4994 /* Find opc within the table of insns. This is formulated as a switch
4995 statement so that (1) we get compile-time notice of cut-paste errors
4996 for duplicated opcodes, and (2) the compiler generates the binary
4997 search tree, rather than us having to post-process the table. */
4999 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5000 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5002 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5004 enum DisasInsnEnum
{
5005 #include "insn-data.def"
5009 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5013 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5015 .help_in1 = in1_##I1, \
5016 .help_in2 = in2_##I2, \
5017 .help_prep = prep_##P, \
5018 .help_wout = wout_##W, \
5019 .help_cout = cout_##CC, \
5020 .help_op = op_##OP, \
5024 /* Allow 0 to be used for NULL in the table below. */
5032 #define SPEC_in1_0 0
5033 #define SPEC_in2_0 0
5034 #define SPEC_prep_0 0
5035 #define SPEC_wout_0 0
5037 static const DisasInsn insn_info
[] = {
5038 #include "insn-data.def"
5042 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5043 case OPC: return &insn_info[insn_ ## NM];
5045 static const DisasInsn
*lookup_opc(uint16_t opc
)
5048 #include "insn-data.def"
5057 /* Extract a field from the insn. The INSN should be left-aligned in
5058 the uint64_t so that we can more easily utilize the big-bit-endian
5059 definitions we extract from the Principals of Operation. */
5061 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5069 /* Zero extract the field from the insn. */
5070 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5072 /* Sign-extend, or un-swap the field as necessary. */
5074 case 0: /* unsigned */
5076 case 1: /* signed */
5077 assert(f
->size
<= 32);
5078 m
= 1u << (f
->size
- 1);
5081 case 2: /* dl+dh split, signed 20 bit. */
5082 r
= ((int8_t)r
<< 12) | (r
>> 8);
5088 /* Validate that the "compressed" encoding we selected above is valid.
5089 I.e. we havn't make two different original fields overlap. */
5090 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5091 o
->presentC
|= 1 << f
->indexC
;
5092 o
->presentO
|= 1 << f
->indexO
;
5094 o
->c
[f
->indexC
] = r
;
5097 /* Lookup the insn at the current PC, extracting the operands into O and
5098 returning the info struct for the insn. Returns NULL for invalid insn. */
5100 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5103 uint64_t insn
, pc
= s
->pc
;
5105 const DisasInsn
*info
;
5107 insn
= ld_code2(env
, pc
);
5108 op
= (insn
>> 8) & 0xff;
5109 ilen
= get_ilen(op
);
5110 s
->next_pc
= s
->pc
+ ilen
;
5117 insn
= ld_code4(env
, pc
) << 32;
5120 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5126 /* We can't actually determine the insn format until we've looked up
5127 the full insn opcode. Which we can't do without locating the
5128 secondary opcode. Assume by default that OP2 is at bit 40; for
5129 those smaller insns that don't actually have a secondary opcode
5130 this will correctly result in OP2 = 0. */
5136 case 0xb2: /* S, RRF, RRE */
5137 case 0xb3: /* RRE, RRD, RRF */
5138 case 0xb9: /* RRE, RRF */
5139 case 0xe5: /* SSE, SIL */
5140 op2
= (insn
<< 8) >> 56;
5144 case 0xc0: /* RIL */
5145 case 0xc2: /* RIL */
5146 case 0xc4: /* RIL */
5147 case 0xc6: /* RIL */
5148 case 0xc8: /* SSF */
5149 case 0xcc: /* RIL */
5150 op2
= (insn
<< 12) >> 60;
5152 case 0xd0 ... 0xdf: /* SS */
5158 case 0xee ... 0xf3: /* SS */
5159 case 0xf8 ... 0xfd: /* SS */
5163 op2
= (insn
<< 40) >> 56;
5167 memset(f
, 0, sizeof(*f
));
5172 /* Lookup the instruction. */
5173 info
= lookup_opc(op
<< 8 | op2
);
5175 /* If we found it, extract the operands. */
5177 DisasFormat fmt
= info
->fmt
;
5180 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5181 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5187 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5189 const DisasInsn
*insn
;
5190 ExitStatus ret
= NO_EXIT
;
5194 /* Search for the insn in the table. */
5195 insn
= extract_insn(env
, s
, &f
);
5197 /* Not found means unimplemented/illegal opcode. */
5199 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5201 gen_illegal_opcode(s
);
5202 return EXIT_NORETURN
;
5205 #ifndef CONFIG_USER_ONLY
5206 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5207 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5208 gen_helper_per_ifetch(cpu_env
, addr
);
5209 tcg_temp_free_i64(addr
);
5213 /* Check for insn specification exceptions. */
5215 int spec
= insn
->spec
, excp
= 0, r
;
5217 if (spec
& SPEC_r1_even
) {
5218 r
= get_field(&f
, r1
);
5220 excp
= PGM_SPECIFICATION
;
5223 if (spec
& SPEC_r2_even
) {
5224 r
= get_field(&f
, r2
);
5226 excp
= PGM_SPECIFICATION
;
5229 if (spec
& SPEC_r3_even
) {
5230 r
= get_field(&f
, r3
);
5232 excp
= PGM_SPECIFICATION
;
5235 if (spec
& SPEC_r1_f128
) {
5236 r
= get_field(&f
, r1
);
5238 excp
= PGM_SPECIFICATION
;
5241 if (spec
& SPEC_r2_f128
) {
5242 r
= get_field(&f
, r2
);
5244 excp
= PGM_SPECIFICATION
;
5248 gen_program_exception(s
, excp
);
5249 return EXIT_NORETURN
;
5253 /* Set up the strutures we use to communicate with the helpers. */
5256 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5257 TCGV_UNUSED_I64(o
.out
);
5258 TCGV_UNUSED_I64(o
.out2
);
5259 TCGV_UNUSED_I64(o
.in1
);
5260 TCGV_UNUSED_I64(o
.in2
);
5261 TCGV_UNUSED_I64(o
.addr1
);
5263 /* Implement the instruction. */
5264 if (insn
->help_in1
) {
5265 insn
->help_in1(s
, &f
, &o
);
5267 if (insn
->help_in2
) {
5268 insn
->help_in2(s
, &f
, &o
);
5270 if (insn
->help_prep
) {
5271 insn
->help_prep(s
, &f
, &o
);
5273 if (insn
->help_op
) {
5274 ret
= insn
->help_op(s
, &o
);
5276 if (insn
->help_wout
) {
5277 insn
->help_wout(s
, &f
, &o
);
5279 if (insn
->help_cout
) {
5280 insn
->help_cout(s
, &o
);
5283 /* Free any temporaries created by the helpers. */
5284 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5285 tcg_temp_free_i64(o
.out
);
5287 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5288 tcg_temp_free_i64(o
.out2
);
5290 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5291 tcg_temp_free_i64(o
.in1
);
5293 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5294 tcg_temp_free_i64(o
.in2
);
5296 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5297 tcg_temp_free_i64(o
.addr1
);
5300 #ifndef CONFIG_USER_ONLY
5301 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5302 /* An exception might be triggered, save PSW if not already done. */
5303 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5304 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5310 /* Call the helper to check for a possible PER exception. */
5311 gen_helper_per_check_exception(cpu_env
);
5315 /* Advance to the next instruction. */
5320 void gen_intermediate_code(CPUS390XState
*env
, struct TranslationBlock
*tb
)
5322 S390CPU
*cpu
= s390_env_get_cpu(env
);
5323 CPUState
*cs
= CPU(cpu
);
5325 target_ulong pc_start
;
5326 uint64_t next_page_start
;
5327 int num_insns
, max_insns
;
5334 if (!(tb
->flags
& FLAG_MASK_64
)) {
5335 pc_start
&= 0x7fffffff;
5340 dc
.cc_op
= CC_OP_DYNAMIC
;
5341 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5343 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5346 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5347 if (max_insns
== 0) {
5348 max_insns
= CF_COUNT_MASK
;
5350 if (max_insns
> TCG_MAX_INSNS
) {
5351 max_insns
= TCG_MAX_INSNS
;
5357 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5360 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5361 status
= EXIT_PC_STALE
;
5363 /* The address covered by the breakpoint must be included in
5364 [tb->pc, tb->pc + tb->size) in order to for it to be
5365 properly cleared -- thus we increment the PC here so that
5366 the logic setting tb->size below does the right thing. */
5371 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5376 if (status
== NO_EXIT
) {
5377 status
= translate_one(env
, &dc
);
5380 /* If we reach a page boundary, are single stepping,
5381 or exhaust instruction count, stop generation. */
5382 if (status
== NO_EXIT
5383 && (dc
.pc
>= next_page_start
5384 || tcg_op_buf_full()
5385 || num_insns
>= max_insns
5387 || cs
->singlestep_enabled
)) {
5388 status
= EXIT_PC_STALE
;
5390 } while (status
== NO_EXIT
);
5392 if (tb
->cflags
& CF_LAST_IO
) {
5401 update_psw_addr(&dc
);
5403 case EXIT_PC_UPDATED
:
5404 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5405 cc op type is in env */
5407 /* Exit the TB, either by raising a debug exception or by return. */
5409 gen_exception(EXCP_DEBUG
);
5418 gen_tb_end(tb
, num_insns
);
5420 tb
->size
= dc
.pc
- pc_start
;
5421 tb
->icount
= num_insns
;
5423 #if defined(S390X_DEBUG_DISAS)
5424 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5425 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5426 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5432 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5435 int cc_op
= data
[1];
5436 env
->psw
.addr
= data
[0];
5437 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {