4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
64 bool singlestep_enabled
;
67 /* Information carried about a condition to be evaluated. */
74 struct { TCGv_i64 a
, b
; } s64
;
75 struct { TCGv_i32 a
, b
; } s32
;
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit
[CC_OP_MAX
];
83 static uint64_t inline_branch_miss
[CC_OP_MAX
];
86 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
88 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
89 if (s
->tb
->flags
& FLAG_MASK_32
) {
90 return pc
| 0x80000000;
96 static TCGv_i64 psw_addr
;
97 static TCGv_i64 psw_mask
;
100 static TCGv_i32 cc_op
;
101 static TCGv_i64 cc_src
;
102 static TCGv_i64 cc_dst
;
103 static TCGv_i64 cc_vr
;
105 static char cpu_reg_names
[32][4];
106 static TCGv_i64 regs
[16];
107 static TCGv_i64 fregs
[16];
109 void s390x_translate_init(void)
113 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
114 tcg_ctx
.tcg_env
= cpu_env
;
115 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
116 offsetof(CPUS390XState
, psw
.addr
),
118 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
119 offsetof(CPUS390XState
, psw
.mask
),
121 gbea
= tcg_global_mem_new_i64(cpu_env
,
122 offsetof(CPUS390XState
, gbea
),
125 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
127 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
129 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
131 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
134 for (i
= 0; i
< 16; i
++) {
135 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
136 regs
[i
] = tcg_global_mem_new(cpu_env
,
137 offsetof(CPUS390XState
, regs
[i
]),
141 for (i
= 0; i
< 16; i
++) {
142 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
143 fregs
[i
] = tcg_global_mem_new(cpu_env
,
144 offsetof(CPUS390XState
, vregs
[i
][0].d
),
145 cpu_reg_names
[i
+ 16]);
149 static TCGv_i64
load_reg(int reg
)
151 TCGv_i64 r
= tcg_temp_new_i64();
152 tcg_gen_mov_i64(r
, regs
[reg
]);
156 static TCGv_i64
load_freg32_i64(int reg
)
158 TCGv_i64 r
= tcg_temp_new_i64();
159 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
163 static void store_reg(int reg
, TCGv_i64 v
)
165 tcg_gen_mov_i64(regs
[reg
], v
);
168 static void store_freg(int reg
, TCGv_i64 v
)
170 tcg_gen_mov_i64(fregs
[reg
], v
);
173 static void store_reg32_i64(int reg
, TCGv_i64 v
)
175 /* 32 bit register writes keep the upper half */
176 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
179 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
181 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
184 static void store_freg32_i64(int reg
, TCGv_i64 v
)
186 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
189 static void return_low128(TCGv_i64 dest
)
191 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
194 static void update_psw_addr(DisasContext
*s
)
197 tcg_gen_movi_i64(psw_addr
, s
->pc
);
200 static void per_branch(DisasContext
*s
, bool to_next
)
202 #ifndef CONFIG_USER_ONLY
203 tcg_gen_movi_i64(gbea
, s
->pc
);
205 if (s
->tb
->flags
& FLAG_MASK_PER
) {
206 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
207 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
209 tcg_temp_free_i64(next_pc
);
215 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
216 TCGv_i64 arg1
, TCGv_i64 arg2
)
218 #ifndef CONFIG_USER_ONLY
219 if (s
->tb
->flags
& FLAG_MASK_PER
) {
220 TCGLabel
*lab
= gen_new_label();
221 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
223 tcg_gen_movi_i64(gbea
, s
->pc
);
224 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
228 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
229 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
230 tcg_temp_free_i64(pc
);
235 static void per_breaking_event(DisasContext
*s
)
237 tcg_gen_movi_i64(gbea
, s
->pc
);
240 static void update_cc_op(DisasContext
*s
)
242 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
243 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
247 static void potential_page_fault(DisasContext
*s
)
253 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
255 return (uint64_t)cpu_lduw_code(env
, pc
);
258 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
260 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
263 static int get_mem_index(DisasContext
*s
)
265 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
266 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
268 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
270 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
278 static void gen_exception(int excp
)
280 TCGv_i32 tmp
= tcg_const_i32(excp
);
281 gen_helper_exception(cpu_env
, tmp
);
282 tcg_temp_free_i32(tmp
);
285 static void gen_program_exception(DisasContext
*s
, int code
)
289 /* Remember what pgm exeption this was. */
290 tmp
= tcg_const_i32(code
);
291 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
292 tcg_temp_free_i32(tmp
);
294 tmp
= tcg_const_i32(s
->ilen
);
295 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
296 tcg_temp_free_i32(tmp
);
304 /* Trigger exception. */
305 gen_exception(EXCP_PGM
);
308 static inline void gen_illegal_opcode(DisasContext
*s
)
310 gen_program_exception(s
, PGM_OPERATION
);
313 static inline void gen_trap(DisasContext
*s
)
317 /* Set DXC to 0xff. */
318 t
= tcg_temp_new_i32();
319 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
320 tcg_gen_ori_i32(t
, t
, 0xff00);
321 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
322 tcg_temp_free_i32(t
);
324 gen_program_exception(s
, PGM_DATA
);
327 #ifndef CONFIG_USER_ONLY
328 static void check_privileged(DisasContext
*s
)
330 if (s
->tb
->flags
& FLAG_MASK_PSTATE
) {
331 gen_program_exception(s
, PGM_PRIVILEGED
);
336 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
338 TCGv_i64 tmp
= tcg_temp_new_i64();
339 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
341 /* Note that d2 is limited to 20 bits, signed. If we crop negative
342 displacements early we create larger immedate addends. */
344 /* Note that addi optimizes the imm==0 case. */
346 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
347 tcg_gen_addi_i64(tmp
, tmp
, d2
);
349 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
351 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
357 tcg_gen_movi_i64(tmp
, d2
);
360 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
366 static inline bool live_cc_data(DisasContext
*s
)
368 return (s
->cc_op
!= CC_OP_DYNAMIC
369 && s
->cc_op
!= CC_OP_STATIC
373 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
375 if (live_cc_data(s
)) {
376 tcg_gen_discard_i64(cc_src
);
377 tcg_gen_discard_i64(cc_dst
);
378 tcg_gen_discard_i64(cc_vr
);
380 s
->cc_op
= CC_OP_CONST0
+ val
;
383 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
385 if (live_cc_data(s
)) {
386 tcg_gen_discard_i64(cc_src
);
387 tcg_gen_discard_i64(cc_vr
);
389 tcg_gen_mov_i64(cc_dst
, dst
);
393 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
396 if (live_cc_data(s
)) {
397 tcg_gen_discard_i64(cc_vr
);
399 tcg_gen_mov_i64(cc_src
, src
);
400 tcg_gen_mov_i64(cc_dst
, dst
);
404 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
405 TCGv_i64 dst
, TCGv_i64 vr
)
407 tcg_gen_mov_i64(cc_src
, src
);
408 tcg_gen_mov_i64(cc_dst
, dst
);
409 tcg_gen_mov_i64(cc_vr
, vr
);
413 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
415 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
418 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
420 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
423 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
425 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
428 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
430 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
433 /* CC value is in env->cc_op */
434 static void set_cc_static(DisasContext
*s
)
436 if (live_cc_data(s
)) {
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_discard_i64(cc_dst
);
439 tcg_gen_discard_i64(cc_vr
);
441 s
->cc_op
= CC_OP_STATIC
;
444 /* calculates cc into cc_op */
445 static void gen_op_calc_cc(DisasContext
*s
)
447 TCGv_i32 local_cc_op
;
450 TCGV_UNUSED_I32(local_cc_op
);
451 TCGV_UNUSED_I64(dummy
);
454 dummy
= tcg_const_i64(0);
468 local_cc_op
= tcg_const_i32(s
->cc_op
);
484 /* s->cc_op is the cc value */
485 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
488 /* env->cc_op already is the cc value */
503 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
508 case CC_OP_LTUGTU_32
:
509 case CC_OP_LTUGTU_64
:
516 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
531 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
534 /* unknown operation - assume 3 arguments and cc_op in env */
535 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
541 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
542 tcg_temp_free_i32(local_cc_op
);
544 if (!TCGV_IS_UNUSED_I64(dummy
)) {
545 tcg_temp_free_i64(dummy
);
548 /* We now have cc in cc_op as constant */
552 static bool use_exit_tb(DisasContext
*s
)
554 return (s
->singlestep_enabled
||
555 (s
->tb
->cflags
& CF_LAST_IO
) ||
556 (s
->tb
->flags
& FLAG_MASK_PER
));
559 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
561 if (unlikely(use_exit_tb(s
))) {
564 #ifndef CONFIG_USER_ONLY
565 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
566 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
572 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_miss
[cc_op
]++;
579 static void account_inline_branch(DisasContext
*s
, int cc_op
)
581 #ifdef DEBUG_INLINE_BRANCHES
582 inline_branch_hit
[cc_op
]++;
586 /* Table of mask values to comparison codes, given a comparison as input.
587 For such, CC=3 should not be possible. */
588 static const TCGCond ltgt_cond
[16] = {
589 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
590 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
591 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
592 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
593 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
594 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
595 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
596 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
599 /* Table of mask values to comparison codes, given a logic op as input.
600 For such, only CC=0 and CC=1 should be possible. */
601 static const TCGCond nz_cond
[16] = {
602 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
603 TCG_COND_NEVER
, TCG_COND_NEVER
,
604 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
605 TCG_COND_NE
, TCG_COND_NE
,
606 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
607 TCG_COND_EQ
, TCG_COND_EQ
,
608 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
609 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
612 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
613 details required to generate a TCG comparison. */
614 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
617 enum cc_op old_cc_op
= s
->cc_op
;
619 if (mask
== 15 || mask
== 0) {
620 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
623 c
->g1
= c
->g2
= true;
628 /* Find the TCG condition for the mask + cc op. */
634 cond
= ltgt_cond
[mask
];
635 if (cond
== TCG_COND_NEVER
) {
638 account_inline_branch(s
, old_cc_op
);
641 case CC_OP_LTUGTU_32
:
642 case CC_OP_LTUGTU_64
:
643 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
644 if (cond
== TCG_COND_NEVER
) {
647 account_inline_branch(s
, old_cc_op
);
651 cond
= nz_cond
[mask
];
652 if (cond
== TCG_COND_NEVER
) {
655 account_inline_branch(s
, old_cc_op
);
670 account_inline_branch(s
, old_cc_op
);
685 account_inline_branch(s
, old_cc_op
);
689 switch (mask
& 0xa) {
690 case 8: /* src == 0 -> no one bit found */
693 case 2: /* src != 0 -> one bit found */
699 account_inline_branch(s
, old_cc_op
);
705 case 8 | 2: /* vr == 0 */
708 case 4 | 1: /* vr != 0 */
711 case 8 | 4: /* no carry -> vr >= src */
714 case 2 | 1: /* carry -> vr < src */
720 account_inline_branch(s
, old_cc_op
);
725 /* Note that CC=0 is impossible; treat it as dont-care. */
727 case 2: /* zero -> op1 == op2 */
730 case 4 | 1: /* !zero -> op1 != op2 */
733 case 4: /* borrow (!carry) -> op1 < op2 */
736 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
742 account_inline_branch(s
, old_cc_op
);
747 /* Calculate cc value. */
752 /* Jump based on CC. We'll load up the real cond below;
753 the assignment here merely avoids a compiler warning. */
754 account_noninline_branch(s
, old_cc_op
);
755 old_cc_op
= CC_OP_STATIC
;
756 cond
= TCG_COND_NEVER
;
760 /* Load up the arguments of the comparison. */
762 c
->g1
= c
->g2
= false;
766 c
->u
.s32
.a
= tcg_temp_new_i32();
767 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
768 c
->u
.s32
.b
= tcg_const_i32(0);
771 case CC_OP_LTUGTU_32
:
774 c
->u
.s32
.a
= tcg_temp_new_i32();
775 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
776 c
->u
.s32
.b
= tcg_temp_new_i32();
777 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
784 c
->u
.s64
.b
= tcg_const_i64(0);
788 case CC_OP_LTUGTU_64
:
792 c
->g1
= c
->g2
= true;
798 c
->u
.s64
.a
= tcg_temp_new_i64();
799 c
->u
.s64
.b
= tcg_const_i64(0);
800 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
805 c
->u
.s32
.a
= tcg_temp_new_i32();
806 c
->u
.s32
.b
= tcg_temp_new_i32();
807 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
808 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
809 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
811 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
818 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
819 c
->u
.s64
.b
= tcg_const_i64(0);
831 case 0x8 | 0x4 | 0x2: /* cc != 3 */
833 c
->u
.s32
.b
= tcg_const_i32(3);
835 case 0x8 | 0x4 | 0x1: /* cc != 2 */
837 c
->u
.s32
.b
= tcg_const_i32(2);
839 case 0x8 | 0x2 | 0x1: /* cc != 1 */
841 c
->u
.s32
.b
= tcg_const_i32(1);
843 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
846 c
->u
.s32
.a
= tcg_temp_new_i32();
847 c
->u
.s32
.b
= tcg_const_i32(0);
848 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
850 case 0x8 | 0x4: /* cc < 2 */
852 c
->u
.s32
.b
= tcg_const_i32(2);
854 case 0x8: /* cc == 0 */
856 c
->u
.s32
.b
= tcg_const_i32(0);
858 case 0x4 | 0x2 | 0x1: /* cc != 0 */
860 c
->u
.s32
.b
= tcg_const_i32(0);
862 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
865 c
->u
.s32
.a
= tcg_temp_new_i32();
866 c
->u
.s32
.b
= tcg_const_i32(0);
867 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
869 case 0x4: /* cc == 1 */
871 c
->u
.s32
.b
= tcg_const_i32(1);
873 case 0x2 | 0x1: /* cc > 1 */
875 c
->u
.s32
.b
= tcg_const_i32(1);
877 case 0x2: /* cc == 2 */
879 c
->u
.s32
.b
= tcg_const_i32(2);
881 case 0x1: /* cc == 3 */
883 c
->u
.s32
.b
= tcg_const_i32(3);
886 /* CC is masked by something else: (8 >> cc) & mask. */
889 c
->u
.s32
.a
= tcg_const_i32(8);
890 c
->u
.s32
.b
= tcg_const_i32(0);
891 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
892 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
903 static void free_compare(DisasCompare
*c
)
907 tcg_temp_free_i64(c
->u
.s64
.a
);
909 tcg_temp_free_i32(c
->u
.s32
.a
);
914 tcg_temp_free_i64(c
->u
.s64
.b
);
916 tcg_temp_free_i32(c
->u
.s32
.b
);
921 /* ====================================================================== */
922 /* Define the insn format enumeration. */
923 #define F0(N) FMT_##N,
924 #define F1(N, X1) F0(N)
925 #define F2(N, X1, X2) F0(N)
926 #define F3(N, X1, X2, X3) F0(N)
927 #define F4(N, X1, X2, X3, X4) F0(N)
928 #define F5(N, X1, X2, X3, X4, X5) F0(N)
931 #include "insn-format.def"
941 /* Define a structure to hold the decoded fields. We'll store each inside
942 an array indexed by an enum. In order to conserve memory, we'll arrange
943 for fields that do not exist at the same time to overlap, thus the "C"
944 for compact. For checking purposes there is an "O" for original index
945 as well that will be applied to availability bitmaps. */
947 enum DisasFieldIndexO
{
970 enum DisasFieldIndexC
{
1001 struct DisasFields
{
1005 unsigned presentC
:16;
1006 unsigned int presentO
;
1010 /* This is the way fields are to be accessed out of DisasFields. */
1011 #define have_field(S, F) have_field1((S), FLD_O_##F)
1012 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1014 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1016 return (f
->presentO
>> c
) & 1;
1019 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1020 enum DisasFieldIndexC c
)
1022 assert(have_field1(f
, o
));
1026 /* Describe the layout of each field in each format. */
1027 typedef struct DisasField
{
1029 unsigned int size
:8;
1030 unsigned int type
:2;
1031 unsigned int indexC
:6;
1032 enum DisasFieldIndexO indexO
:8;
1035 typedef struct DisasFormatInfo
{
1036 DisasField op
[NUM_C_FIELD
];
1039 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1040 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1041 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1042 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1043 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1045 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1046 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1047 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1048 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1049 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1050 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1051 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1052 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1054 #define F0(N) { { } },
1055 #define F1(N, X1) { { X1 } },
1056 #define F2(N, X1, X2) { { X1, X2 } },
1057 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1058 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1059 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1061 static const DisasFormatInfo format_info
[] = {
1062 #include "insn-format.def"
1080 /* Generally, we'll extract operands into this structures, operate upon
1081 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1082 of routines below for more details. */
1084 bool g_out
, g_out2
, g_in1
, g_in2
;
1085 TCGv_i64 out
, out2
, in1
, in2
;
1089 /* Instructions can place constraints on their operands, raising specification
1090 exceptions if they are violated. To make this easy to automate, each "in1",
1091 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1092 of the following, or 0. To make this easy to document, we'll put the
1093 SPEC_<name> defines next to <name>. */
1095 #define SPEC_r1_even 1
1096 #define SPEC_r2_even 2
1097 #define SPEC_r3_even 4
1098 #define SPEC_r1_f128 8
1099 #define SPEC_r2_f128 16
1101 /* Return values from translate_one, indicating the state of the TB. */
1103 /* Continue the TB. */
1105 /* We have emitted one or more goto_tb. No fixup required. */
1107 /* We are not using a goto_tb (for whatever reason), but have updated
1108 the PC (for whatever reason), so there's no need to do it again on
1111 /* We have updated the PC and CC values. */
1113 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1114 updated the PC for the next instruction to be executed. */
1116 /* We are exiting the TB to the main loop. */
1117 EXIT_PC_STALE_NOCHAIN
,
1118 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1119 No following code will be executed. */
1131 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1132 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1133 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1134 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1135 void (*help_cout
)(DisasContext
*, DisasOps
*);
1136 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1141 /* ====================================================================== */
1142 /* Miscellaneous helpers, used by several operations. */
1144 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1145 DisasOps
*o
, int mask
)
1147 int b2
= get_field(f
, b2
);
1148 int d2
= get_field(f
, d2
);
1151 o
->in2
= tcg_const_i64(d2
& mask
);
1153 o
->in2
= get_address(s
, 0, b2
, d2
);
1154 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1158 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1160 if (dest
== s
->next_pc
) {
1161 per_branch(s
, true);
1164 if (use_goto_tb(s
, dest
)) {
1166 per_breaking_event(s
);
1168 tcg_gen_movi_i64(psw_addr
, dest
);
1169 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1170 return EXIT_GOTO_TB
;
1172 tcg_gen_movi_i64(psw_addr
, dest
);
1173 per_branch(s
, false);
1174 return EXIT_PC_UPDATED
;
1178 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1179 bool is_imm
, int imm
, TCGv_i64 cdest
)
1182 uint64_t dest
= s
->pc
+ 2 * imm
;
1185 /* Take care of the special cases first. */
1186 if (c
->cond
== TCG_COND_NEVER
) {
1191 if (dest
== s
->next_pc
) {
1192 /* Branch to next. */
1193 per_branch(s
, true);
1197 if (c
->cond
== TCG_COND_ALWAYS
) {
1198 ret
= help_goto_direct(s
, dest
);
1202 if (TCGV_IS_UNUSED_I64(cdest
)) {
1203 /* E.g. bcr %r0 -> no branch. */
1207 if (c
->cond
== TCG_COND_ALWAYS
) {
1208 tcg_gen_mov_i64(psw_addr
, cdest
);
1209 per_branch(s
, false);
1210 ret
= EXIT_PC_UPDATED
;
1215 if (use_goto_tb(s
, s
->next_pc
)) {
1216 if (is_imm
&& use_goto_tb(s
, dest
)) {
1217 /* Both exits can use goto_tb. */
1220 lab
= gen_new_label();
1222 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1224 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1227 /* Branch not taken. */
1229 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1230 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1234 per_breaking_event(s
);
1236 tcg_gen_movi_i64(psw_addr
, dest
);
1237 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1241 /* Fallthru can use goto_tb, but taken branch cannot. */
1242 /* Store taken branch destination before the brcond. This
1243 avoids having to allocate a new local temp to hold it.
1244 We'll overwrite this in the not taken case anyway. */
1246 tcg_gen_mov_i64(psw_addr
, cdest
);
1249 lab
= gen_new_label();
1251 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1253 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1256 /* Branch not taken. */
1259 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1260 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1264 tcg_gen_movi_i64(psw_addr
, dest
);
1266 per_breaking_event(s
);
1267 ret
= EXIT_PC_UPDATED
;
1270 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1271 Most commonly we're single-stepping or some other condition that
1272 disables all use of goto_tb. Just update the PC and exit. */
1274 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1276 cdest
= tcg_const_i64(dest
);
1280 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1282 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1284 TCGv_i32 t0
= tcg_temp_new_i32();
1285 TCGv_i64 t1
= tcg_temp_new_i64();
1286 TCGv_i64 z
= tcg_const_i64(0);
1287 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1288 tcg_gen_extu_i32_i64(t1
, t0
);
1289 tcg_temp_free_i32(t0
);
1290 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1291 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1292 tcg_temp_free_i64(t1
);
1293 tcg_temp_free_i64(z
);
1297 tcg_temp_free_i64(cdest
);
1299 tcg_temp_free_i64(next
);
1301 ret
= EXIT_PC_UPDATED
;
1309 /* ====================================================================== */
1310 /* The operations. These perform the bulk of the work for any insn,
1311 usually after the operands have been loaded and output initialized. */
1313 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1316 z
= tcg_const_i64(0);
1317 n
= tcg_temp_new_i64();
1318 tcg_gen_neg_i64(n
, o
->in2
);
1319 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1320 tcg_temp_free_i64(n
);
1321 tcg_temp_free_i64(z
);
1325 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1327 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1331 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1333 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1337 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1339 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1340 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1344 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1346 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1350 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1355 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1357 /* The carry flag is the msb of CC, therefore the branch mask that would
1358 create that comparison is 3. Feeding the generated comparison to
1359 setcond produces the carry flag that we desire. */
1360 disas_jcc(s
, &cmp
, 3);
1361 carry
= tcg_temp_new_i64();
1363 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1365 TCGv_i32 t
= tcg_temp_new_i32();
1366 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1367 tcg_gen_extu_i32_i64(carry
, t
);
1368 tcg_temp_free_i32(t
);
1372 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1373 tcg_temp_free_i64(carry
);
1377 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1379 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1383 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1385 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1389 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1391 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1392 return_low128(o
->out2
);
1396 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1398 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1402 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1404 int shift
= s
->insn
->data
& 0xff;
1405 int size
= s
->insn
->data
>> 8;
1406 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1409 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1410 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1411 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1413 /* Produce the CC from only the bits manipulated. */
1414 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1415 set_cc_nz_u64(s
, cc_dst
);
1419 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1421 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1422 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1423 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1424 per_branch(s
, false);
1425 return EXIT_PC_UPDATED
;
1431 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1433 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1434 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1437 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1439 int m1
= get_field(s
->fields
, m1
);
1440 bool is_imm
= have_field(s
->fields
, i2
);
1441 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1444 /* BCR with R2 = 0 causes no branching */
1445 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1447 /* Perform serialization */
1448 /* FIXME: check for fast-BCR-serialization facility */
1449 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1452 /* Perform serialization */
1453 /* FIXME: perform checkpoint-synchronisation */
1454 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1459 disas_jcc(s
, &c
, m1
);
1460 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1463 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1465 int r1
= get_field(s
->fields
, r1
);
1466 bool is_imm
= have_field(s
->fields
, i2
);
1467 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1471 c
.cond
= TCG_COND_NE
;
1476 t
= tcg_temp_new_i64();
1477 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1478 store_reg32_i64(r1
, t
);
1479 c
.u
.s32
.a
= tcg_temp_new_i32();
1480 c
.u
.s32
.b
= tcg_const_i32(0);
1481 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1482 tcg_temp_free_i64(t
);
1484 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1487 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1489 int r1
= get_field(s
->fields
, r1
);
1490 int imm
= get_field(s
->fields
, i2
);
1494 c
.cond
= TCG_COND_NE
;
1499 t
= tcg_temp_new_i64();
1500 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1501 tcg_gen_subi_i64(t
, t
, 1);
1502 store_reg32h_i64(r1
, t
);
1503 c
.u
.s32
.a
= tcg_temp_new_i32();
1504 c
.u
.s32
.b
= tcg_const_i32(0);
1505 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1506 tcg_temp_free_i64(t
);
1508 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1511 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1513 int r1
= get_field(s
->fields
, r1
);
1514 bool is_imm
= have_field(s
->fields
, i2
);
1515 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1518 c
.cond
= TCG_COND_NE
;
1523 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1524 c
.u
.s64
.a
= regs
[r1
];
1525 c
.u
.s64
.b
= tcg_const_i64(0);
1527 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1530 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1532 int r1
= get_field(s
->fields
, r1
);
1533 int r3
= get_field(s
->fields
, r3
);
1534 bool is_imm
= have_field(s
->fields
, i2
);
1535 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1539 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1544 t
= tcg_temp_new_i64();
1545 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1546 c
.u
.s32
.a
= tcg_temp_new_i32();
1547 c
.u
.s32
.b
= tcg_temp_new_i32();
1548 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1549 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1550 store_reg32_i64(r1
, t
);
1551 tcg_temp_free_i64(t
);
1553 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1556 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1558 int r1
= get_field(s
->fields
, r1
);
1559 int r3
= get_field(s
->fields
, r3
);
1560 bool is_imm
= have_field(s
->fields
, i2
);
1561 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1564 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1567 if (r1
== (r3
| 1)) {
1568 c
.u
.s64
.b
= load_reg(r3
| 1);
1571 c
.u
.s64
.b
= regs
[r3
| 1];
1575 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1576 c
.u
.s64
.a
= regs
[r1
];
1579 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1582 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1584 int imm
, m3
= get_field(s
->fields
, m3
);
1588 c
.cond
= ltgt_cond
[m3
];
1589 if (s
->insn
->data
) {
1590 c
.cond
= tcg_unsigned_cond(c
.cond
);
1592 c
.is_64
= c
.g1
= c
.g2
= true;
1596 is_imm
= have_field(s
->fields
, i4
);
1598 imm
= get_field(s
->fields
, i4
);
1601 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1602 get_field(s
->fields
, d4
));
1605 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1608 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1610 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1615 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1617 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1622 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1624 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1629 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1631 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1632 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1633 tcg_temp_free_i32(m3
);
1634 gen_set_cc_nz_f32(s
, o
->in2
);
1638 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1640 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1641 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1642 tcg_temp_free_i32(m3
);
1643 gen_set_cc_nz_f64(s
, o
->in2
);
1647 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1649 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1650 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1651 tcg_temp_free_i32(m3
);
1652 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1656 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1658 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1659 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1660 tcg_temp_free_i32(m3
);
1661 gen_set_cc_nz_f32(s
, o
->in2
);
1665 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1667 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1668 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1669 tcg_temp_free_i32(m3
);
1670 gen_set_cc_nz_f64(s
, o
->in2
);
1674 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1676 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1677 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1678 tcg_temp_free_i32(m3
);
1679 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1683 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1685 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1686 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1687 tcg_temp_free_i32(m3
);
1688 gen_set_cc_nz_f32(s
, o
->in2
);
1692 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1694 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1695 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1696 tcg_temp_free_i32(m3
);
1697 gen_set_cc_nz_f64(s
, o
->in2
);
1701 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1703 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1704 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1705 tcg_temp_free_i32(m3
);
1706 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1710 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1712 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1713 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1714 tcg_temp_free_i32(m3
);
1715 gen_set_cc_nz_f32(s
, o
->in2
);
1719 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1721 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1722 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1723 tcg_temp_free_i32(m3
);
1724 gen_set_cc_nz_f64(s
, o
->in2
);
1728 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1730 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1731 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1732 tcg_temp_free_i32(m3
);
1733 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1737 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1739 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1740 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1741 tcg_temp_free_i32(m3
);
1745 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1747 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1748 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1749 tcg_temp_free_i32(m3
);
1753 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1755 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1756 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1757 tcg_temp_free_i32(m3
);
1758 return_low128(o
->out2
);
1762 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1764 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1765 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1766 tcg_temp_free_i32(m3
);
1770 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1772 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1773 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1774 tcg_temp_free_i32(m3
);
1778 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1780 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1781 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1782 tcg_temp_free_i32(m3
);
1783 return_low128(o
->out2
);
1787 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1789 int r2
= get_field(s
->fields
, r2
);
1790 TCGv_i64 len
= tcg_temp_new_i64();
1792 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1794 return_low128(o
->out
);
1796 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1797 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1798 tcg_temp_free_i64(len
);
1803 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1805 int l
= get_field(s
->fields
, l1
);
1810 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1811 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1814 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1815 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1818 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1819 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1822 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1823 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1826 vl
= tcg_const_i32(l
);
1827 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1828 tcg_temp_free_i32(vl
);
1832 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1836 static ExitStatus
op_clcl(DisasContext
*s
, DisasOps
*o
)
1838 int r1
= get_field(s
->fields
, r1
);
1839 int r2
= get_field(s
->fields
, r2
);
1842 /* r1 and r2 must be even. */
1843 if (r1
& 1 || r2
& 1) {
1844 gen_program_exception(s
, PGM_SPECIFICATION
);
1845 return EXIT_NORETURN
;
1848 t1
= tcg_const_i32(r1
);
1849 t2
= tcg_const_i32(r2
);
1850 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1851 tcg_temp_free_i32(t1
);
1852 tcg_temp_free_i32(t2
);
1857 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1859 int r1
= get_field(s
->fields
, r1
);
1860 int r3
= get_field(s
->fields
, r3
);
1863 /* r1 and r3 must be even. */
1864 if (r1
& 1 || r3
& 1) {
1865 gen_program_exception(s
, PGM_SPECIFICATION
);
1866 return EXIT_NORETURN
;
1869 t1
= tcg_const_i32(r1
);
1870 t3
= tcg_const_i32(r3
);
1871 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1872 tcg_temp_free_i32(t1
);
1873 tcg_temp_free_i32(t3
);
1878 static ExitStatus
op_clclu(DisasContext
*s
, DisasOps
*o
)
1880 int r1
= get_field(s
->fields
, r1
);
1881 int r3
= get_field(s
->fields
, r3
);
1884 /* r1 and r3 must be even. */
1885 if (r1
& 1 || r3
& 1) {
1886 gen_program_exception(s
, PGM_SPECIFICATION
);
1887 return EXIT_NORETURN
;
1890 t1
= tcg_const_i32(r1
);
1891 t3
= tcg_const_i32(r3
);
1892 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1893 tcg_temp_free_i32(t1
);
1894 tcg_temp_free_i32(t3
);
1899 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1901 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1902 TCGv_i32 t1
= tcg_temp_new_i32();
1903 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1904 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1906 tcg_temp_free_i32(t1
);
1907 tcg_temp_free_i32(m3
);
1911 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1913 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1915 return_low128(o
->in2
);
1919 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1921 TCGv_i64 t
= tcg_temp_new_i64();
1922 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1923 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1924 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1925 tcg_temp_free_i64(t
);
1929 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1931 int d2
= get_field(s
->fields
, d2
);
1932 int b2
= get_field(s
->fields
, b2
);
1935 /* Note that in1 = R3 (new value) and
1936 in2 = (zero-extended) R1 (expected value). */
1938 addr
= get_address(s
, 0, b2
, d2
);
1939 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1940 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1941 tcg_temp_free_i64(addr
);
1943 /* Are the memory and expected values (un)equal? Note that this setcond
1944 produces the output CC value, thus the NE sense of the test. */
1945 cc
= tcg_temp_new_i64();
1946 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1947 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1948 tcg_temp_free_i64(cc
);
1954 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1956 int r1
= get_field(s
->fields
, r1
);
1957 int r3
= get_field(s
->fields
, r3
);
1958 int d2
= get_field(s
->fields
, d2
);
1959 int b2
= get_field(s
->fields
, b2
);
1961 TCGv_i32 t_r1
, t_r3
;
1963 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1964 addr
= get_address(s
, 0, b2
, d2
);
1965 t_r1
= tcg_const_i32(r1
);
1966 t_r3
= tcg_const_i32(r3
);
1967 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
1968 tcg_temp_free_i64(addr
);
1969 tcg_temp_free_i32(t_r1
);
1970 tcg_temp_free_i32(t_r3
);
1976 static ExitStatus
op_csst(DisasContext
*s
, DisasOps
*o
)
1978 int r3
= get_field(s
->fields
, r3
);
1979 TCGv_i32 t_r3
= tcg_const_i32(r3
);
1981 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
1982 tcg_temp_free_i32(t_r3
);
1988 #ifndef CONFIG_USER_ONLY
1989 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1991 TCGMemOp mop
= s
->insn
->data
;
1992 TCGv_i64 addr
, old
, cc
;
1993 TCGLabel
*lab
= gen_new_label();
1995 /* Note that in1 = R1 (zero-extended expected value),
1996 out = R1 (original reg), out2 = R1+1 (new value). */
1998 check_privileged(s
);
1999 addr
= tcg_temp_new_i64();
2000 old
= tcg_temp_new_i64();
2001 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2002 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2003 get_mem_index(s
), mop
| MO_ALIGN
);
2004 tcg_temp_free_i64(addr
);
2006 /* Are the memory and expected values (un)equal? */
2007 cc
= tcg_temp_new_i64();
2008 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2009 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2011 /* Write back the output now, so that it happens before the
2012 following branch, so that we don't need local temps. */
2013 if ((mop
& MO_SIZE
) == MO_32
) {
2014 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2016 tcg_gen_mov_i64(o
->out
, old
);
2018 tcg_temp_free_i64(old
);
2020 /* If the comparison was equal, and the LSB of R2 was set,
2021 then we need to flush the TLB (for all cpus). */
2022 tcg_gen_xori_i64(cc
, cc
, 1);
2023 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2024 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2025 tcg_temp_free_i64(cc
);
2027 gen_helper_purge(cpu_env
);
2034 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2036 TCGv_i64 t1
= tcg_temp_new_i64();
2037 TCGv_i32 t2
= tcg_temp_new_i32();
2038 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2039 gen_helper_cvd(t1
, t2
);
2040 tcg_temp_free_i32(t2
);
2041 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2042 tcg_temp_free_i64(t1
);
2046 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2048 int m3
= get_field(s
->fields
, m3
);
2049 TCGLabel
*lab
= gen_new_label();
2052 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2053 if (s
->insn
->data
) {
2054 c
= tcg_unsigned_cond(c
);
2056 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2065 static ExitStatus
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2067 int m3
= get_field(s
->fields
, m3
);
2068 int r1
= get_field(s
->fields
, r1
);
2069 int r2
= get_field(s
->fields
, r2
);
2070 TCGv_i32 tr1
, tr2
, chk
;
2072 /* R1 and R2 must both be even. */
2073 if ((r1
| r2
) & 1) {
2074 gen_program_exception(s
, PGM_SPECIFICATION
);
2075 return EXIT_NORETURN
;
2077 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2081 tr1
= tcg_const_i32(r1
);
2082 tr2
= tcg_const_i32(r2
);
2083 chk
= tcg_const_i32(m3
);
2085 switch (s
->insn
->data
) {
2087 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2090 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2093 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2096 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2099 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2102 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2105 g_assert_not_reached();
2108 tcg_temp_free_i32(tr1
);
2109 tcg_temp_free_i32(tr2
);
2110 tcg_temp_free_i32(chk
);
2115 #ifndef CONFIG_USER_ONLY
2116 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2118 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2119 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2120 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2122 check_privileged(s
);
2126 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2128 tcg_temp_free_i32(func_code
);
2129 tcg_temp_free_i32(r3
);
2130 tcg_temp_free_i32(r1
);
2135 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2137 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2138 return_low128(o
->out
);
2142 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2144 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2145 return_low128(o
->out
);
2149 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2151 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2152 return_low128(o
->out
);
2156 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2158 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2159 return_low128(o
->out
);
2163 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2165 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2169 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2171 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2175 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2177 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2178 return_low128(o
->out2
);
2182 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2184 int r2
= get_field(s
->fields
, r2
);
2185 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2189 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2191 /* No cache information provided. */
2192 tcg_gen_movi_i64(o
->out
, -1);
2196 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2198 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2202 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2204 int r1
= get_field(s
->fields
, r1
);
2205 int r2
= get_field(s
->fields
, r2
);
2206 TCGv_i64 t
= tcg_temp_new_i64();
2208 /* Note the "subsequently" in the PoO, which implies a defined result
2209 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2210 tcg_gen_shri_i64(t
, psw_mask
, 32);
2211 store_reg32_i64(r1
, t
);
2213 store_reg32_i64(r2
, psw_mask
);
2216 tcg_temp_free_i64(t
);
2220 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2222 int r1
= get_field(s
->fields
, r1
);
2226 /* Nested EXECUTE is not allowed. */
2227 if (unlikely(s
->ex_value
)) {
2228 gen_program_exception(s
, PGM_EXECUTE
);
2229 return EXIT_NORETURN
;
2236 v1
= tcg_const_i64(0);
2241 ilen
= tcg_const_i32(s
->ilen
);
2242 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2243 tcg_temp_free_i32(ilen
);
2246 tcg_temp_free_i64(v1
);
2249 return EXIT_PC_CC_UPDATED
;
2252 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2254 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2255 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2256 tcg_temp_free_i32(m3
);
2260 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2262 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2263 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2264 tcg_temp_free_i32(m3
);
2268 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2270 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2271 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2272 return_low128(o
->out2
);
2273 tcg_temp_free_i32(m3
);
2277 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2279 /* We'll use the original input for cc computation, since we get to
2280 compare that against 0, which ought to be better than comparing
2281 the real output against 64. It also lets cc_dst be a convenient
2282 temporary during our computation. */
2283 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2285 /* R1 = IN ? CLZ(IN) : 64. */
2286 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2288 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2289 value by 64, which is undefined. But since the shift is 64 iff the
2290 input is zero, we still get the correct result after and'ing. */
2291 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2292 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2293 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2297 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2299 int m3
= get_field(s
->fields
, m3
);
2300 int pos
, len
, base
= s
->insn
->data
;
2301 TCGv_i64 tmp
= tcg_temp_new_i64();
2306 /* Effectively a 32-bit load. */
2307 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2314 /* Effectively a 16-bit load. */
2315 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2323 /* Effectively an 8-bit load. */
2324 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2329 pos
= base
+ ctz32(m3
) * 8;
2330 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2331 ccm
= ((1ull << len
) - 1) << pos
;
2335 /* This is going to be a sequence of loads and inserts. */
2336 pos
= base
+ 32 - 8;
2340 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2341 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2342 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2345 m3
= (m3
<< 1) & 0xf;
2351 tcg_gen_movi_i64(tmp
, ccm
);
2352 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2353 tcg_temp_free_i64(tmp
);
2357 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2359 int shift
= s
->insn
->data
& 0xff;
2360 int size
= s
->insn
->data
>> 8;
2361 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2365 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2370 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2372 t1
= tcg_temp_new_i64();
2373 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2374 tcg_gen_shri_i64(t1
, t1
, 36);
2375 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2377 tcg_gen_extu_i32_i64(t1
, cc_op
);
2378 tcg_gen_shli_i64(t1
, t1
, 28);
2379 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2380 tcg_temp_free_i64(t1
);
2384 #ifndef CONFIG_USER_ONLY
2385 static ExitStatus
op_idte(DisasContext
*s
, DisasOps
*o
)
2389 check_privileged(s
);
2390 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2391 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2393 m4
= tcg_const_i32(0);
2395 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2396 tcg_temp_free_i32(m4
);
2400 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2404 check_privileged(s
);
2405 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2406 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2408 m4
= tcg_const_i32(0);
2410 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2411 tcg_temp_free_i32(m4
);
2415 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2417 check_privileged(s
);
2418 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2423 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2425 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2430 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2432 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2437 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2439 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2444 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2446 /* The real output is indeed the original value in memory;
2447 recompute the addition for the computation of CC. */
2448 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2449 s
->insn
->data
| MO_ALIGN
);
2450 /* However, we need to recompute the addition for setting CC. */
2451 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2455 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2457 /* The real output is indeed the original value in memory;
2458 recompute the addition for the computation of CC. */
2459 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2460 s
->insn
->data
| MO_ALIGN
);
2461 /* However, we need to recompute the operation for setting CC. */
2462 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2466 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2468 /* The real output is indeed the original value in memory;
2469 recompute the addition for the computation of CC. */
2470 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2471 s
->insn
->data
| MO_ALIGN
);
2472 /* However, we need to recompute the operation for setting CC. */
2473 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2477 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2479 /* The real output is indeed the original value in memory;
2480 recompute the addition for the computation of CC. */
2481 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2482 s
->insn
->data
| MO_ALIGN
);
2483 /* However, we need to recompute the operation for setting CC. */
2484 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2488 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2490 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2494 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2496 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2500 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2502 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2506 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2508 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2512 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2514 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2515 return_low128(o
->out2
);
2519 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2521 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2522 return_low128(o
->out2
);
2526 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2528 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2532 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2534 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2538 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2540 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2544 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2546 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2550 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2552 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2556 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2558 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2562 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2564 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2568 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2570 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2574 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2576 TCGLabel
*lab
= gen_new_label();
2577 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2578 /* The value is stored even in case of trap. */
2579 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2585 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2587 TCGLabel
*lab
= gen_new_label();
2588 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2589 /* The value is stored even in case of trap. */
2590 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2596 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2598 TCGLabel
*lab
= gen_new_label();
2599 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2600 /* The value is stored even in case of trap. */
2601 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2607 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2609 TCGLabel
*lab
= gen_new_label();
2610 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2611 /* The value is stored even in case of trap. */
2612 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2618 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2620 TCGLabel
*lab
= gen_new_label();
2621 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2622 /* The value is stored even in case of trap. */
2623 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2629 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2633 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2636 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2640 TCGv_i32 t32
= tcg_temp_new_i32();
2643 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2646 t
= tcg_temp_new_i64();
2647 tcg_gen_extu_i32_i64(t
, t32
);
2648 tcg_temp_free_i32(t32
);
2650 z
= tcg_const_i64(0);
2651 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2652 tcg_temp_free_i64(t
);
2653 tcg_temp_free_i64(z
);
2659 #ifndef CONFIG_USER_ONLY
2660 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2662 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2663 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2664 check_privileged(s
);
2665 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2666 tcg_temp_free_i32(r1
);
2667 tcg_temp_free_i32(r3
);
2671 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2673 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2674 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2675 check_privileged(s
);
2676 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2677 tcg_temp_free_i32(r1
);
2678 tcg_temp_free_i32(r3
);
2682 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2684 check_privileged(s
);
2685 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2690 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2692 check_privileged(s
);
2694 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2698 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2702 check_privileged(s
);
2703 per_breaking_event(s
);
2705 t1
= tcg_temp_new_i64();
2706 t2
= tcg_temp_new_i64();
2707 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2708 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2709 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2710 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2711 tcg_gen_shli_i64(t1
, t1
, 32);
2712 gen_helper_load_psw(cpu_env
, t1
, t2
);
2713 tcg_temp_free_i64(t1
);
2714 tcg_temp_free_i64(t2
);
2715 return EXIT_NORETURN
;
2718 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2722 check_privileged(s
);
2723 per_breaking_event(s
);
2725 t1
= tcg_temp_new_i64();
2726 t2
= tcg_temp_new_i64();
2727 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2728 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2729 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2730 gen_helper_load_psw(cpu_env
, t1
, t2
);
2731 tcg_temp_free_i64(t1
);
2732 tcg_temp_free_i64(t2
);
2733 return EXIT_NORETURN
;
2737 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2739 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2740 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2741 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2742 tcg_temp_free_i32(r1
);
2743 tcg_temp_free_i32(r3
);
2747 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2749 int r1
= get_field(s
->fields
, r1
);
2750 int r3
= get_field(s
->fields
, r3
);
2753 /* Only one register to read. */
2754 t1
= tcg_temp_new_i64();
2755 if (unlikely(r1
== r3
)) {
2756 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2757 store_reg32_i64(r1
, t1
);
2762 /* First load the values of the first and last registers to trigger
2763 possible page faults. */
2764 t2
= tcg_temp_new_i64();
2765 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2766 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2767 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2768 store_reg32_i64(r1
, t1
);
2769 store_reg32_i64(r3
, t2
);
2771 /* Only two registers to read. */
2772 if (((r1
+ 1) & 15) == r3
) {
2778 /* Then load the remaining registers. Page fault can't occur. */
2780 tcg_gen_movi_i64(t2
, 4);
2783 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2784 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2785 store_reg32_i64(r1
, t1
);
2793 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2795 int r1
= get_field(s
->fields
, r1
);
2796 int r3
= get_field(s
->fields
, r3
);
2799 /* Only one register to read. */
2800 t1
= tcg_temp_new_i64();
2801 if (unlikely(r1
== r3
)) {
2802 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2803 store_reg32h_i64(r1
, t1
);
2808 /* First load the values of the first and last registers to trigger
2809 possible page faults. */
2810 t2
= tcg_temp_new_i64();
2811 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2812 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2813 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2814 store_reg32h_i64(r1
, t1
);
2815 store_reg32h_i64(r3
, t2
);
2817 /* Only two registers to read. */
2818 if (((r1
+ 1) & 15) == r3
) {
2824 /* Then load the remaining registers. Page fault can't occur. */
2826 tcg_gen_movi_i64(t2
, 4);
2829 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2830 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2831 store_reg32h_i64(r1
, t1
);
2839 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2841 int r1
= get_field(s
->fields
, r1
);
2842 int r3
= get_field(s
->fields
, r3
);
2845 /* Only one register to read. */
2846 if (unlikely(r1
== r3
)) {
2847 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2851 /* First load the values of the first and last registers to trigger
2852 possible page faults. */
2853 t1
= tcg_temp_new_i64();
2854 t2
= tcg_temp_new_i64();
2855 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2856 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2857 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2858 tcg_gen_mov_i64(regs
[r1
], t1
);
2861 /* Only two registers to read. */
2862 if (((r1
+ 1) & 15) == r3
) {
2867 /* Then load the remaining registers. Page fault can't occur. */
2869 tcg_gen_movi_i64(t1
, 8);
2872 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2873 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2880 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2883 TCGMemOp mop
= s
->insn
->data
;
2885 /* In a parallel context, stop the world and single step. */
2886 if (parallel_cpus
) {
2887 potential_page_fault(s
);
2888 gen_exception(EXCP_ATOMIC
);
2889 return EXIT_NORETURN
;
2892 /* In a serial context, perform the two loads ... */
2893 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2894 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2895 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2896 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2897 tcg_temp_free_i64(a1
);
2898 tcg_temp_free_i64(a2
);
2900 /* ... and indicate that we performed them while interlocked. */
2901 gen_op_movi_cc(s
, 0);
2905 static ExitStatus
op_lpq(DisasContext
*s
, DisasOps
*o
)
2907 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
2908 return_low128(o
->out2
);
2912 #ifndef CONFIG_USER_ONLY
2913 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2915 check_privileged(s
);
2916 potential_page_fault(s
);
2917 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2921 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2923 check_privileged(s
);
2924 potential_page_fault(s
);
2925 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2930 static ExitStatus
op_lzrb(DisasContext
*s
, DisasOps
*o
)
2932 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
2936 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2939 o
->g_out
= o
->g_in2
;
2940 TCGV_UNUSED_I64(o
->in2
);
2945 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2947 int b2
= get_field(s
->fields
, b2
);
2948 TCGv ar1
= tcg_temp_new_i64();
2951 o
->g_out
= o
->g_in2
;
2952 TCGV_UNUSED_I64(o
->in2
);
2955 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2956 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
2957 tcg_gen_movi_i64(ar1
, 0);
2959 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
2960 tcg_gen_movi_i64(ar1
, 1);
2962 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
2964 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2966 tcg_gen_movi_i64(ar1
, 0);
2969 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
2970 tcg_gen_movi_i64(ar1
, 2);
2974 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2975 tcg_temp_free_i64(ar1
);
2980 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2984 o
->g_out
= o
->g_in1
;
2985 o
->g_out2
= o
->g_in2
;
2986 TCGV_UNUSED_I64(o
->in1
);
2987 TCGV_UNUSED_I64(o
->in2
);
2988 o
->g_in1
= o
->g_in2
= false;
2992 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2994 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2995 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2996 tcg_temp_free_i32(l
);
3000 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3002 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3003 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3004 tcg_temp_free_i32(l
);
3008 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3010 int r1
= get_field(s
->fields
, r1
);
3011 int r2
= get_field(s
->fields
, r2
);
3014 /* r1 and r2 must be even. */
3015 if (r1
& 1 || r2
& 1) {
3016 gen_program_exception(s
, PGM_SPECIFICATION
);
3017 return EXIT_NORETURN
;
3020 t1
= tcg_const_i32(r1
);
3021 t2
= tcg_const_i32(r2
);
3022 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3023 tcg_temp_free_i32(t1
);
3024 tcg_temp_free_i32(t2
);
3029 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3031 int r1
= get_field(s
->fields
, r1
);
3032 int r3
= get_field(s
->fields
, r3
);
3035 /* r1 and r3 must be even. */
3036 if (r1
& 1 || r3
& 1) {
3037 gen_program_exception(s
, PGM_SPECIFICATION
);
3038 return EXIT_NORETURN
;
3041 t1
= tcg_const_i32(r1
);
3042 t3
= tcg_const_i32(r3
);
3043 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3044 tcg_temp_free_i32(t1
);
3045 tcg_temp_free_i32(t3
);
3050 static ExitStatus
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3052 int r1
= get_field(s
->fields
, r1
);
3053 int r3
= get_field(s
->fields
, r3
);
3056 /* r1 and r3 must be even. */
3057 if (r1
& 1 || r3
& 1) {
3058 gen_program_exception(s
, PGM_SPECIFICATION
);
3059 return EXIT_NORETURN
;
3062 t1
= tcg_const_i32(r1
);
3063 t3
= tcg_const_i32(r3
);
3064 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3065 tcg_temp_free_i32(t1
);
3066 tcg_temp_free_i32(t3
);
3071 static ExitStatus
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3073 int r3
= get_field(s
->fields
, r3
);
3074 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3079 #ifndef CONFIG_USER_ONLY
3080 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3082 int r1
= get_field(s
->fields
, l1
);
3083 check_privileged(s
);
3084 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3089 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3091 int r1
= get_field(s
->fields
, l1
);
3092 check_privileged(s
);
3093 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3099 static ExitStatus
op_mvn(DisasContext
*s
, DisasOps
*o
)
3101 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3102 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3103 tcg_temp_free_i32(l
);
3107 static ExitStatus
op_mvo(DisasContext
*s
, DisasOps
*o
)
3109 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3110 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3111 tcg_temp_free_i32(l
);
3115 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3117 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3122 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3124 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3126 return_low128(o
->in2
);
3130 static ExitStatus
op_mvz(DisasContext
*s
, DisasOps
*o
)
3132 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3133 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3134 tcg_temp_free_i32(l
);
3138 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3140 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3144 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3146 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3150 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3152 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3156 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3158 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3162 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3164 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3168 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3170 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3171 return_low128(o
->out2
);
3175 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3177 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3178 return_low128(o
->out2
);
3182 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3184 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3185 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3186 tcg_temp_free_i64(r3
);
3190 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3192 int r3
= get_field(s
->fields
, r3
);
3193 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3197 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3199 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3200 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3201 tcg_temp_free_i64(r3
);
3205 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3207 int r3
= get_field(s
->fields
, r3
);
3208 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3212 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3215 z
= tcg_const_i64(0);
3216 n
= tcg_temp_new_i64();
3217 tcg_gen_neg_i64(n
, o
->in2
);
3218 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3219 tcg_temp_free_i64(n
);
3220 tcg_temp_free_i64(z
);
3224 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3226 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3230 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3232 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3236 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3238 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3239 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3243 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3245 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3246 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3247 tcg_temp_free_i32(l
);
3252 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3254 tcg_gen_neg_i64(o
->out
, o
->in2
);
3258 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3260 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3264 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3266 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3270 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3272 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3273 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3277 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3279 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3280 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3281 tcg_temp_free_i32(l
);
3286 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3288 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3292 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3294 int shift
= s
->insn
->data
& 0xff;
3295 int size
= s
->insn
->data
>> 8;
3296 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3299 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3300 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3302 /* Produce the CC from only the bits manipulated. */
3303 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3304 set_cc_nz_u64(s
, cc_dst
);
3308 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3310 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3311 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3312 tcg_temp_free_i32(l
);
3316 static ExitStatus
op_pka(DisasContext
*s
, DisasOps
*o
)
3318 int l2
= get_field(s
->fields
, l2
) + 1;
3321 /* The length must not exceed 32 bytes. */
3323 gen_program_exception(s
, PGM_SPECIFICATION
);
3324 return EXIT_NORETURN
;
3326 l
= tcg_const_i32(l2
);
3327 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3328 tcg_temp_free_i32(l
);
3332 static ExitStatus
op_pku(DisasContext
*s
, DisasOps
*o
)
3334 int l2
= get_field(s
->fields
, l2
) + 1;
3337 /* The length must be even and should not exceed 64 bytes. */
3338 if ((l2
& 1) || (l2
> 64)) {
3339 gen_program_exception(s
, PGM_SPECIFICATION
);
3340 return EXIT_NORETURN
;
3342 l
= tcg_const_i32(l2
);
3343 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3344 tcg_temp_free_i32(l
);
3348 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3350 gen_helper_popcnt(o
->out
, o
->in2
);
3354 #ifndef CONFIG_USER_ONLY
3355 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3357 check_privileged(s
);
3358 gen_helper_ptlb(cpu_env
);
3363 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3365 int i3
= get_field(s
->fields
, i3
);
3366 int i4
= get_field(s
->fields
, i4
);
3367 int i5
= get_field(s
->fields
, i5
);
3368 int do_zero
= i4
& 0x80;
3369 uint64_t mask
, imask
, pmask
;
3372 /* Adjust the arguments for the specific insn. */
3373 switch (s
->fields
->op2
) {
3374 case 0x55: /* risbg */
3379 case 0x5d: /* risbhg */
3382 pmask
= 0xffffffff00000000ull
;
3384 case 0x51: /* risblg */
3387 pmask
= 0x00000000ffffffffull
;
3393 /* MASK is the set of bits to be inserted from R2.
3394 Take care for I3/I4 wraparound. */
3397 mask
^= pmask
>> i4
>> 1;
3399 mask
|= ~(pmask
>> i4
>> 1);
3403 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3404 insns, we need to keep the other half of the register. */
3405 imask
= ~mask
| ~pmask
;
3407 if (s
->fields
->op2
== 0x55) {
3417 if (s
->fields
->op2
== 0x5d) {
3421 /* In some cases we can implement this with extract. */
3422 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3423 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3427 /* In some cases we can implement this with deposit. */
3428 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3429 /* Note that we rotate the bits to be inserted to the lsb, not to
3430 the position as described in the PoO. */
3431 rot
= (rot
- pos
) & 63;
3436 /* Rotate the input as necessary. */
3437 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3439 /* Insert the selected bits into the output. */
3442 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3444 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3446 } else if (imask
== 0) {
3447 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3449 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3450 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3451 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3456 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3458 int i3
= get_field(s
->fields
, i3
);
3459 int i4
= get_field(s
->fields
, i4
);
3460 int i5
= get_field(s
->fields
, i5
);
3463 /* If this is a test-only form, arrange to discard the result. */
3465 o
->out
= tcg_temp_new_i64();
3473 /* MASK is the set of bits to be operated on from R2.
3474 Take care for I3/I4 wraparound. */
3477 mask
^= ~0ull >> i4
>> 1;
3479 mask
|= ~(~0ull >> i4
>> 1);
3482 /* Rotate the input as necessary. */
3483 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3486 switch (s
->fields
->op2
) {
3487 case 0x55: /* AND */
3488 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3489 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3492 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3493 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3495 case 0x57: /* XOR */
3496 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3497 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3504 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3505 set_cc_nz_u64(s
, cc_dst
);
3509 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3511 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3515 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3517 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3521 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3523 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3527 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3529 TCGv_i32 t1
= tcg_temp_new_i32();
3530 TCGv_i32 t2
= tcg_temp_new_i32();
3531 TCGv_i32 to
= tcg_temp_new_i32();
3532 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3533 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3534 tcg_gen_rotl_i32(to
, t1
, t2
);
3535 tcg_gen_extu_i32_i64(o
->out
, to
);
3536 tcg_temp_free_i32(t1
);
3537 tcg_temp_free_i32(t2
);
3538 tcg_temp_free_i32(to
);
3542 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3544 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3548 #ifndef CONFIG_USER_ONLY
3549 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3551 check_privileged(s
);
3552 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3557 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3559 check_privileged(s
);
3560 gen_helper_sacf(cpu_env
, o
->in2
);
3561 /* Addressing mode has changed, so end the block. */
3562 return EXIT_PC_STALE
;
3566 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3568 int sam
= s
->insn
->data
;
3584 /* Bizarre but true, we check the address of the current insn for the
3585 specification exception, not the next to be executed. Thus the PoO
3586 documents that Bad Things Happen two bytes before the end. */
3587 if (s
->pc
& ~mask
) {
3588 gen_program_exception(s
, PGM_SPECIFICATION
);
3589 return EXIT_NORETURN
;
3593 tsam
= tcg_const_i64(sam
);
3594 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3595 tcg_temp_free_i64(tsam
);
3597 /* Always exit the TB, since we (may have) changed execution mode. */
3598 return EXIT_PC_STALE
;
3601 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3603 int r1
= get_field(s
->fields
, r1
);
3604 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3608 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3610 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3614 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3616 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3620 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3622 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3623 return_low128(o
->out2
);
3627 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3629 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3633 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3635 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3639 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3641 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3642 return_low128(o
->out2
);
3646 #ifndef CONFIG_USER_ONLY
3647 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3649 check_privileged(s
);
3650 potential_page_fault(s
);
3651 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3656 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3658 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3659 check_privileged(s
);
3660 potential_page_fault(s
);
3661 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3663 tcg_temp_free_i32(r1
);
3668 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3675 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3677 /* We want to store when the condition is fulfilled, so branch
3678 out when it's not */
3679 c
.cond
= tcg_invert_cond(c
.cond
);
3681 lab
= gen_new_label();
3683 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3685 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3689 r1
= get_field(s
->fields
, r1
);
3690 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3691 switch (s
->insn
->data
) {
3693 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3696 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3698 case 2: /* STOCFH */
3699 h
= tcg_temp_new_i64();
3700 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3701 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3702 tcg_temp_free_i64(h
);
3705 g_assert_not_reached();
3707 tcg_temp_free_i64(a
);
3713 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3715 uint64_t sign
= 1ull << s
->insn
->data
;
3716 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3717 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3718 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3719 /* The arithmetic left shift is curious in that it does not affect
3720 the sign bit. Copy that over from the source unchanged. */
3721 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3722 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3723 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3727 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3729 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3733 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3735 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3739 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3741 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3745 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3747 gen_helper_sfpc(cpu_env
, o
->in2
);
3751 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3753 gen_helper_sfas(cpu_env
, o
->in2
);
3757 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3759 int b2
= get_field(s
->fields
, b2
);
3760 int d2
= get_field(s
->fields
, d2
);
3761 TCGv_i64 t1
= tcg_temp_new_i64();
3762 TCGv_i64 t2
= tcg_temp_new_i64();
3765 switch (s
->fields
->op2
) {
3766 case 0x99: /* SRNM */
3769 case 0xb8: /* SRNMB */
3772 case 0xb9: /* SRNMT */
3778 mask
= (1 << len
) - 1;
3780 /* Insert the value into the appropriate field of the FPC. */
3782 tcg_gen_movi_i64(t1
, d2
& mask
);
3784 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3785 tcg_gen_andi_i64(t1
, t1
, mask
);
3787 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3788 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3789 tcg_temp_free_i64(t1
);
3791 /* Then install the new FPC to set the rounding mode in fpu_status. */
3792 gen_helper_sfpc(cpu_env
, t2
);
3793 tcg_temp_free_i64(t2
);
3797 #ifndef CONFIG_USER_ONLY
3798 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3800 check_privileged(s
);
3801 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3802 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3806 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3808 check_privileged(s
);
3809 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3813 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3815 check_privileged(s
);
3816 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3817 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3818 return EXIT_PC_STALE_NOCHAIN
;
3821 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3823 check_privileged(s
);
3824 /* ??? Surely cpu address != cpu number. In any case the previous
3825 version of this stored more than the required half-word, so it
3826 is unlikely this has ever been tested. */
3827 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3831 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3833 gen_helper_stck(o
->out
, cpu_env
);
3834 /* ??? We don't implement clock states. */
3835 gen_op_movi_cc(s
, 0);
3839 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3841 TCGv_i64 c1
= tcg_temp_new_i64();
3842 TCGv_i64 c2
= tcg_temp_new_i64();
3843 gen_helper_stck(c1
, cpu_env
);
3844 /* Shift the 64-bit value into its place as a zero-extended
3845 104-bit value. Note that "bit positions 64-103 are always
3846 non-zero so that they compare differently to STCK"; we set
3847 the least significant bit to 1. */
3848 tcg_gen_shli_i64(c2
, c1
, 56);
3849 tcg_gen_shri_i64(c1
, c1
, 8);
3850 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3851 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3852 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3853 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3854 tcg_temp_free_i64(c1
);
3855 tcg_temp_free_i64(c2
);
3856 /* ??? We don't implement clock states. */
3857 gen_op_movi_cc(s
, 0);
3861 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3863 check_privileged(s
);
3864 gen_helper_sckc(cpu_env
, o
->in2
);
3868 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3870 check_privileged(s
);
3871 gen_helper_stckc(o
->out
, cpu_env
);
3875 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3877 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3878 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3879 check_privileged(s
);
3880 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3881 tcg_temp_free_i32(r1
);
3882 tcg_temp_free_i32(r3
);
3886 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3888 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3889 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3890 check_privileged(s
);
3891 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3892 tcg_temp_free_i32(r1
);
3893 tcg_temp_free_i32(r3
);
3897 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3899 check_privileged(s
);
3900 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
3901 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
3905 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3907 check_privileged(s
);
3908 gen_helper_spt(cpu_env
, o
->in2
);
3912 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3914 check_privileged(s
);
3915 gen_helper_stfl(cpu_env
);
3919 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3921 check_privileged(s
);
3922 gen_helper_stpt(o
->out
, cpu_env
);
3926 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3928 check_privileged(s
);
3929 potential_page_fault(s
);
3930 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3935 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3937 check_privileged(s
);
3938 gen_helper_spx(cpu_env
, o
->in2
);
3942 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3944 check_privileged(s
);
3945 potential_page_fault(s
);
3946 gen_helper_xsch(cpu_env
, regs
[1]);
3951 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3953 check_privileged(s
);
3954 potential_page_fault(s
);
3955 gen_helper_csch(cpu_env
, regs
[1]);
3960 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3962 check_privileged(s
);
3963 potential_page_fault(s
);
3964 gen_helper_hsch(cpu_env
, regs
[1]);
3969 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3971 check_privileged(s
);
3972 potential_page_fault(s
);
3973 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3978 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3980 check_privileged(s
);
3981 potential_page_fault(s
);
3982 gen_helper_rchp(cpu_env
, regs
[1]);
3987 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3989 check_privileged(s
);
3990 potential_page_fault(s
);
3991 gen_helper_rsch(cpu_env
, regs
[1]);
3996 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3998 check_privileged(s
);
3999 potential_page_fault(s
);
4000 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4005 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
4007 check_privileged(s
);
4008 potential_page_fault(s
);
4009 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4014 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
4016 check_privileged(s
);
4017 potential_page_fault(s
);
4018 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4023 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
4025 check_privileged(s
);
4026 potential_page_fault(s
);
4027 gen_helper_chsc(cpu_env
, o
->in2
);
4032 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
4034 check_privileged(s
);
4035 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4036 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4040 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4042 uint64_t i2
= get_field(s
->fields
, i2
);
4045 check_privileged(s
);
4047 /* It is important to do what the instruction name says: STORE THEN.
4048 If we let the output hook perform the store then if we fault and
4049 restart, we'll have the wrong SYSTEM MASK in place. */
4050 t
= tcg_temp_new_i64();
4051 tcg_gen_shri_i64(t
, psw_mask
, 56);
4052 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4053 tcg_temp_free_i64(t
);
4055 if (s
->fields
->op
== 0xac) {
4056 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4057 (i2
<< 56) | 0x00ffffffffffffffull
);
4059 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4062 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4063 return EXIT_PC_STALE_NOCHAIN
;
4066 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
4068 check_privileged(s
);
4069 potential_page_fault(s
);
4070 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4074 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
4076 check_privileged(s
);
4077 potential_page_fault(s
);
4078 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4083 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
4085 potential_page_fault(s
);
4086 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4091 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
4093 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4097 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
4099 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4103 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
4105 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4109 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
4111 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4115 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
4117 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4118 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4119 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4120 tcg_temp_free_i32(r1
);
4121 tcg_temp_free_i32(r3
);
4125 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
4127 int m3
= get_field(s
->fields
, m3
);
4128 int pos
, base
= s
->insn
->data
;
4129 TCGv_i64 tmp
= tcg_temp_new_i64();
4131 pos
= base
+ ctz32(m3
) * 8;
4134 /* Effectively a 32-bit store. */
4135 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4136 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4142 /* Effectively a 16-bit store. */
4143 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4144 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4151 /* Effectively an 8-bit store. */
4152 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4153 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4157 /* This is going to be a sequence of shifts and stores. */
4158 pos
= base
+ 32 - 8;
4161 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4162 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4163 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4165 m3
= (m3
<< 1) & 0xf;
4170 tcg_temp_free_i64(tmp
);
4174 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4176 int r1
= get_field(s
->fields
, r1
);
4177 int r3
= get_field(s
->fields
, r3
);
4178 int size
= s
->insn
->data
;
4179 TCGv_i64 tsize
= tcg_const_i64(size
);
4183 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4185 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4190 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4194 tcg_temp_free_i64(tsize
);
4198 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4200 int r1
= get_field(s
->fields
, r1
);
4201 int r3
= get_field(s
->fields
, r3
);
4202 TCGv_i64 t
= tcg_temp_new_i64();
4203 TCGv_i64 t4
= tcg_const_i64(4);
4204 TCGv_i64 t32
= tcg_const_i64(32);
4207 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4208 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4212 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4216 tcg_temp_free_i64(t
);
4217 tcg_temp_free_i64(t4
);
4218 tcg_temp_free_i64(t32
);
4222 static ExitStatus
op_stpq(DisasContext
*s
, DisasOps
*o
)
4224 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4228 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4230 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4231 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4233 gen_helper_srst(cpu_env
, r1
, r2
);
4235 tcg_temp_free_i32(r1
);
4236 tcg_temp_free_i32(r2
);
4241 static ExitStatus
op_srstu(DisasContext
*s
, DisasOps
*o
)
4243 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4244 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4246 gen_helper_srstu(cpu_env
, r1
, r2
);
4248 tcg_temp_free_i32(r1
);
4249 tcg_temp_free_i32(r2
);
4254 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4256 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4260 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4265 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4267 /* The !borrow flag is the msb of CC. Since we want the inverse of
4268 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4269 disas_jcc(s
, &cmp
, 8 | 4);
4270 borrow
= tcg_temp_new_i64();
4272 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4274 TCGv_i32 t
= tcg_temp_new_i32();
4275 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4276 tcg_gen_extu_i32_i64(borrow
, t
);
4277 tcg_temp_free_i32(t
);
4281 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4282 tcg_temp_free_i64(borrow
);
4286 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4293 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4294 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4295 tcg_temp_free_i32(t
);
4297 t
= tcg_const_i32(s
->ilen
);
4298 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4299 tcg_temp_free_i32(t
);
4301 gen_exception(EXCP_SVC
);
4302 return EXIT_NORETURN
;
4305 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4309 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4310 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4311 gen_op_movi_cc(s
, cc
);
4315 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4317 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4322 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4324 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4329 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4331 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4336 #ifndef CONFIG_USER_ONLY
4338 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4340 check_privileged(s
);
4341 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4346 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4348 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4355 static ExitStatus
op_tp(DisasContext
*s
, DisasOps
*o
)
4357 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4358 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4359 tcg_temp_free_i32(l1
);
4364 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4366 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4367 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4368 tcg_temp_free_i32(l
);
4373 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4375 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4376 return_low128(o
->out2
);
4381 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4383 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4384 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4385 tcg_temp_free_i32(l
);
4390 static ExitStatus
op_trtr(DisasContext
*s
, DisasOps
*o
)
4392 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4393 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4394 tcg_temp_free_i32(l
);
4399 static ExitStatus
op_trXX(DisasContext
*s
, DisasOps
*o
)
4401 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4402 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4403 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4404 TCGv_i32 tst
= tcg_temp_new_i32();
4405 int m3
= get_field(s
->fields
, m3
);
4407 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4411 tcg_gen_movi_i32(tst
, -1);
4413 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4414 if (s
->insn
->opc
& 3) {
4415 tcg_gen_ext8u_i32(tst
, tst
);
4417 tcg_gen_ext16u_i32(tst
, tst
);
4420 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4422 tcg_temp_free_i32(r1
);
4423 tcg_temp_free_i32(r2
);
4424 tcg_temp_free_i32(sizes
);
4425 tcg_temp_free_i32(tst
);
4430 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4432 TCGv_i32 t1
= tcg_const_i32(0xff);
4433 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4434 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4435 tcg_temp_free_i32(t1
);
4440 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4442 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4443 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4444 tcg_temp_free_i32(l
);
4448 static ExitStatus
op_unpka(DisasContext
*s
, DisasOps
*o
)
4450 int l1
= get_field(s
->fields
, l1
) + 1;
4453 /* The length must not exceed 32 bytes. */
4455 gen_program_exception(s
, PGM_SPECIFICATION
);
4456 return EXIT_NORETURN
;
4458 l
= tcg_const_i32(l1
);
4459 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4460 tcg_temp_free_i32(l
);
4465 static ExitStatus
op_unpku(DisasContext
*s
, DisasOps
*o
)
4467 int l1
= get_field(s
->fields
, l1
) + 1;
4470 /* The length must be even and should not exceed 64 bytes. */
4471 if ((l1
& 1) || (l1
> 64)) {
4472 gen_program_exception(s
, PGM_SPECIFICATION
);
4473 return EXIT_NORETURN
;
4475 l
= tcg_const_i32(l1
);
4476 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4477 tcg_temp_free_i32(l
);
4483 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4485 int d1
= get_field(s
->fields
, d1
);
4486 int d2
= get_field(s
->fields
, d2
);
4487 int b1
= get_field(s
->fields
, b1
);
4488 int b2
= get_field(s
->fields
, b2
);
4489 int l
= get_field(s
->fields
, l1
);
4492 o
->addr1
= get_address(s
, 0, b1
, d1
);
4494 /* If the addresses are identical, this is a store/memset of zero. */
4495 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4496 o
->in2
= tcg_const_i64(0);
4500 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4503 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4507 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4510 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4514 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4517 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4521 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4523 gen_op_movi_cc(s
, 0);
4527 /* But in general we'll defer to a helper. */
4528 o
->in2
= get_address(s
, 0, b2
, d2
);
4529 t32
= tcg_const_i32(l
);
4530 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4531 tcg_temp_free_i32(t32
);
4536 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4538 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4542 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4544 int shift
= s
->insn
->data
& 0xff;
4545 int size
= s
->insn
->data
>> 8;
4546 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4549 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4550 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4552 /* Produce the CC from only the bits manipulated. */
4553 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4554 set_cc_nz_u64(s
, cc_dst
);
4558 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4560 o
->out
= tcg_const_i64(0);
4564 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4566 o
->out
= tcg_const_i64(0);
4572 /* ====================================================================== */
4573 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4574 the original inputs), update the various cc data structures in order to
4575 be able to compute the new condition code. */
4577 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4579 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4582 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4584 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4587 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4589 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4592 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4594 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4597 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4599 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4602 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4604 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4607 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4609 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4612 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4614 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4617 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4619 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4622 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4624 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4627 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4629 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4632 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4634 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4637 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4639 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4642 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4644 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4647 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4649 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4652 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4654 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4657 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4659 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4662 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4664 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4667 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4669 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4672 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4674 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4675 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4678 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4680 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4683 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4685 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4688 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4690 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4693 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4695 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4698 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4700 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4703 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4705 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4708 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4710 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4713 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4715 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4718 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4720 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4723 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4725 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4728 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4730 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4733 /* ====================================================================== */
4734 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4735 with the TCG register to which we will write. Used in combination with
4736 the "wout" generators, in some cases we need a new temporary, and in
4737 some cases we can write to a TCG global. */
4739 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4741 o
->out
= tcg_temp_new_i64();
4743 #define SPEC_prep_new 0
4745 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4747 o
->out
= tcg_temp_new_i64();
4748 o
->out2
= tcg_temp_new_i64();
4750 #define SPEC_prep_new_P 0
4752 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4754 o
->out
= regs
[get_field(f
, r1
)];
4757 #define SPEC_prep_r1 0
4759 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4761 int r1
= get_field(f
, r1
);
4763 o
->out2
= regs
[r1
+ 1];
4764 o
->g_out
= o
->g_out2
= true;
4766 #define SPEC_prep_r1_P SPEC_r1_even
4768 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4770 o
->out
= fregs
[get_field(f
, r1
)];
4773 #define SPEC_prep_f1 0
4775 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4777 int r1
= get_field(f
, r1
);
4779 o
->out2
= fregs
[r1
+ 2];
4780 o
->g_out
= o
->g_out2
= true;
4782 #define SPEC_prep_x1 SPEC_r1_f128
4784 /* ====================================================================== */
4785 /* The "Write OUTput" generators. These generally perform some non-trivial
4786 copy of data to TCG globals, or to main memory. The trivial cases are
4787 generally handled by having a "prep" generator install the TCG global
4788 as the destination of the operation. */
4790 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4792 store_reg(get_field(f
, r1
), o
->out
);
4794 #define SPEC_wout_r1 0
4796 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4798 int r1
= get_field(f
, r1
);
4799 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4801 #define SPEC_wout_r1_8 0
4803 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4805 int r1
= get_field(f
, r1
);
4806 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4808 #define SPEC_wout_r1_16 0
4810 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4812 store_reg32_i64(get_field(f
, r1
), o
->out
);
4814 #define SPEC_wout_r1_32 0
4816 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4818 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4820 #define SPEC_wout_r1_32h 0
4822 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4824 int r1
= get_field(f
, r1
);
4825 store_reg32_i64(r1
, o
->out
);
4826 store_reg32_i64(r1
+ 1, o
->out2
);
4828 #define SPEC_wout_r1_P32 SPEC_r1_even
4830 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4832 int r1
= get_field(f
, r1
);
4833 store_reg32_i64(r1
+ 1, o
->out
);
4834 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4835 store_reg32_i64(r1
, o
->out
);
4837 #define SPEC_wout_r1_D32 SPEC_r1_even
4839 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4841 int r3
= get_field(f
, r3
);
4842 store_reg32_i64(r3
, o
->out
);
4843 store_reg32_i64(r3
+ 1, o
->out2
);
4845 #define SPEC_wout_r3_P32 SPEC_r3_even
4847 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4849 int r3
= get_field(f
, r3
);
4850 store_reg(r3
, o
->out
);
4851 store_reg(r3
+ 1, o
->out2
);
4853 #define SPEC_wout_r3_P64 SPEC_r3_even
4855 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4857 store_freg32_i64(get_field(f
, r1
), o
->out
);
4859 #define SPEC_wout_e1 0
4861 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4863 store_freg(get_field(f
, r1
), o
->out
);
4865 #define SPEC_wout_f1 0
4867 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4869 int f1
= get_field(s
->fields
, r1
);
4870 store_freg(f1
, o
->out
);
4871 store_freg(f1
+ 2, o
->out2
);
4873 #define SPEC_wout_x1 SPEC_r1_f128
4875 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4877 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4878 store_reg32_i64(get_field(f
, r1
), o
->out
);
4881 #define SPEC_wout_cond_r1r2_32 0
4883 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4885 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4886 store_freg32_i64(get_field(f
, r1
), o
->out
);
4889 #define SPEC_wout_cond_e1e2 0
4891 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4893 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4895 #define SPEC_wout_m1_8 0
4897 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4899 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4901 #define SPEC_wout_m1_16 0
4903 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4905 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4907 #define SPEC_wout_m1_32 0
4909 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4911 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4913 #define SPEC_wout_m1_64 0
4915 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4917 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4919 #define SPEC_wout_m2_32 0
4921 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4923 store_reg(get_field(f
, r1
), o
->in2
);
4925 #define SPEC_wout_in2_r1 0
4927 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4929 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4931 #define SPEC_wout_in2_r1_32 0
4933 /* ====================================================================== */
4934 /* The "INput 1" generators. These load the first operand to an insn. */
4936 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4938 o
->in1
= load_reg(get_field(f
, r1
));
4940 #define SPEC_in1_r1 0
4942 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4944 o
->in1
= regs
[get_field(f
, r1
)];
4947 #define SPEC_in1_r1_o 0
4949 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4951 o
->in1
= tcg_temp_new_i64();
4952 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4954 #define SPEC_in1_r1_32s 0
4956 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4958 o
->in1
= tcg_temp_new_i64();
4959 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4961 #define SPEC_in1_r1_32u 0
4963 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4965 o
->in1
= tcg_temp_new_i64();
4966 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4968 #define SPEC_in1_r1_sr32 0
4970 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4972 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4974 #define SPEC_in1_r1p1 SPEC_r1_even
4976 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4978 o
->in1
= tcg_temp_new_i64();
4979 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4981 #define SPEC_in1_r1p1_32s SPEC_r1_even
4983 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4985 o
->in1
= tcg_temp_new_i64();
4986 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4988 #define SPEC_in1_r1p1_32u SPEC_r1_even
4990 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4992 int r1
= get_field(f
, r1
);
4993 o
->in1
= tcg_temp_new_i64();
4994 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4996 #define SPEC_in1_r1_D32 SPEC_r1_even
4998 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5000 o
->in1
= load_reg(get_field(f
, r2
));
5002 #define SPEC_in1_r2 0
5004 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5006 o
->in1
= tcg_temp_new_i64();
5007 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5009 #define SPEC_in1_r2_sr32 0
5011 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5013 o
->in1
= load_reg(get_field(f
, r3
));
5015 #define SPEC_in1_r3 0
5017 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5019 o
->in1
= regs
[get_field(f
, r3
)];
5022 #define SPEC_in1_r3_o 0
5024 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5026 o
->in1
= tcg_temp_new_i64();
5027 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5029 #define SPEC_in1_r3_32s 0
5031 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5033 o
->in1
= tcg_temp_new_i64();
5034 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5036 #define SPEC_in1_r3_32u 0
5038 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5040 int r3
= get_field(f
, r3
);
5041 o
->in1
= tcg_temp_new_i64();
5042 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5044 #define SPEC_in1_r3_D32 SPEC_r3_even
5046 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5048 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5050 #define SPEC_in1_e1 0
5052 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5054 o
->in1
= fregs
[get_field(f
, r1
)];
5057 #define SPEC_in1_f1_o 0
5059 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5061 int r1
= get_field(f
, r1
);
5063 o
->out2
= fregs
[r1
+ 2];
5064 o
->g_out
= o
->g_out2
= true;
5066 #define SPEC_in1_x1_o SPEC_r1_f128
5068 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5070 o
->in1
= fregs
[get_field(f
, r3
)];
5073 #define SPEC_in1_f3_o 0
5075 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5077 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5079 #define SPEC_in1_la1 0
5081 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5083 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5084 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5086 #define SPEC_in1_la2 0
5088 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5091 o
->in1
= tcg_temp_new_i64();
5092 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5094 #define SPEC_in1_m1_8u 0
5096 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5099 o
->in1
= tcg_temp_new_i64();
5100 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5102 #define SPEC_in1_m1_16s 0
5104 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5107 o
->in1
= tcg_temp_new_i64();
5108 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5110 #define SPEC_in1_m1_16u 0
5112 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5115 o
->in1
= tcg_temp_new_i64();
5116 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5118 #define SPEC_in1_m1_32s 0
5120 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5123 o
->in1
= tcg_temp_new_i64();
5124 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5126 #define SPEC_in1_m1_32u 0
5128 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5131 o
->in1
= tcg_temp_new_i64();
5132 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5134 #define SPEC_in1_m1_64 0
5136 /* ====================================================================== */
5137 /* The "INput 2" generators. These load the second operand to an insn. */
5139 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5141 o
->in2
= regs
[get_field(f
, r1
)];
5144 #define SPEC_in2_r1_o 0
5146 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5148 o
->in2
= tcg_temp_new_i64();
5149 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5151 #define SPEC_in2_r1_16u 0
5153 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5155 o
->in2
= tcg_temp_new_i64();
5156 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5158 #define SPEC_in2_r1_32u 0
5160 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5162 int r1
= get_field(f
, r1
);
5163 o
->in2
= tcg_temp_new_i64();
5164 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5166 #define SPEC_in2_r1_D32 SPEC_r1_even
5168 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5170 o
->in2
= load_reg(get_field(f
, r2
));
5172 #define SPEC_in2_r2 0
5174 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5176 o
->in2
= regs
[get_field(f
, r2
)];
5179 #define SPEC_in2_r2_o 0
5181 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5183 int r2
= get_field(f
, r2
);
5185 o
->in2
= load_reg(r2
);
5188 #define SPEC_in2_r2_nz 0
5190 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5192 o
->in2
= tcg_temp_new_i64();
5193 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5195 #define SPEC_in2_r2_8s 0
5197 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5199 o
->in2
= tcg_temp_new_i64();
5200 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5202 #define SPEC_in2_r2_8u 0
5204 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5206 o
->in2
= tcg_temp_new_i64();
5207 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5209 #define SPEC_in2_r2_16s 0
5211 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5213 o
->in2
= tcg_temp_new_i64();
5214 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5216 #define SPEC_in2_r2_16u 0
5218 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5220 o
->in2
= load_reg(get_field(f
, r3
));
5222 #define SPEC_in2_r3 0
5224 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5226 o
->in2
= tcg_temp_new_i64();
5227 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5229 #define SPEC_in2_r3_sr32 0
5231 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5233 o
->in2
= tcg_temp_new_i64();
5234 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5236 #define SPEC_in2_r2_32s 0
5238 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5240 o
->in2
= tcg_temp_new_i64();
5241 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5243 #define SPEC_in2_r2_32u 0
5245 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5247 o
->in2
= tcg_temp_new_i64();
5248 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5250 #define SPEC_in2_r2_sr32 0
5252 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5254 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5256 #define SPEC_in2_e2 0
5258 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5260 o
->in2
= fregs
[get_field(f
, r2
)];
5263 #define SPEC_in2_f2_o 0
5265 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5267 int r2
= get_field(f
, r2
);
5269 o
->in2
= fregs
[r2
+ 2];
5270 o
->g_in1
= o
->g_in2
= true;
5272 #define SPEC_in2_x2_o SPEC_r2_f128
5274 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5276 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5278 #define SPEC_in2_ra2 0
5280 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5282 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5283 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5285 #define SPEC_in2_a2 0
5287 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5289 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5291 #define SPEC_in2_ri2 0
5293 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5295 help_l2_shift(s
, f
, o
, 31);
5297 #define SPEC_in2_sh32 0
5299 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5301 help_l2_shift(s
, f
, o
, 63);
5303 #define SPEC_in2_sh64 0
5305 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5308 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5310 #define SPEC_in2_m2_8u 0
5312 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5315 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5317 #define SPEC_in2_m2_16s 0
5319 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5322 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5324 #define SPEC_in2_m2_16u 0
5326 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5329 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5331 #define SPEC_in2_m2_32s 0
5333 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5336 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5338 #define SPEC_in2_m2_32u 0
5340 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5343 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5345 #define SPEC_in2_m2_64 0
5347 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5350 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5352 #define SPEC_in2_mri2_16u 0
5354 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5357 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5359 #define SPEC_in2_mri2_32s 0
5361 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5364 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5366 #define SPEC_in2_mri2_32u 0
5368 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5371 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5373 #define SPEC_in2_mri2_64 0
5375 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5377 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5379 #define SPEC_in2_i2 0
5381 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5383 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5385 #define SPEC_in2_i2_8u 0
5387 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5389 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5391 #define SPEC_in2_i2_16u 0
5393 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5395 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5397 #define SPEC_in2_i2_32u 0
5399 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5401 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5402 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5404 #define SPEC_in2_i2_16u_shl 0
5406 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5408 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5409 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5411 #define SPEC_in2_i2_32u_shl 0
5413 #ifndef CONFIG_USER_ONLY
5414 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5416 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5418 #define SPEC_in2_insn 0
5421 /* ====================================================================== */
5423 /* Find opc within the table of insns. This is formulated as a switch
5424 statement so that (1) we get compile-time notice of cut-paste errors
5425 for duplicated opcodes, and (2) the compiler generates the binary
5426 search tree, rather than us having to post-process the table. */
5428 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5429 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5431 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5433 enum DisasInsnEnum
{
5434 #include "insn-data.def"
5438 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5442 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5444 .help_in1 = in1_##I1, \
5445 .help_in2 = in2_##I2, \
5446 .help_prep = prep_##P, \
5447 .help_wout = wout_##W, \
5448 .help_cout = cout_##CC, \
5449 .help_op = op_##OP, \
5453 /* Allow 0 to be used for NULL in the table below. */
5461 #define SPEC_in1_0 0
5462 #define SPEC_in2_0 0
5463 #define SPEC_prep_0 0
5464 #define SPEC_wout_0 0
5466 /* Give smaller names to the various facilities. */
5467 #define FAC_Z S390_FEAT_ZARCH
5468 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5469 #define FAC_DFP S390_FEAT_DFP
5470 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5471 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5472 #define FAC_EE S390_FEAT_EXECUTE_EXT
5473 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5474 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5475 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5476 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5477 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5478 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5479 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5480 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5481 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5482 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5483 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5484 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5485 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5486 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5487 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5488 #define FAC_SFLE S390_FEAT_STFLE
5489 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5490 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5491 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5492 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5493 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5494 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5495 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5496 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5497 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5499 static const DisasInsn insn_info
[] = {
5500 #include "insn-data.def"
5504 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5505 case OPC: return &insn_info[insn_ ## NM];
5507 static const DisasInsn
*lookup_opc(uint16_t opc
)
5510 #include "insn-data.def"
5519 /* Extract a field from the insn. The INSN should be left-aligned in
5520 the uint64_t so that we can more easily utilize the big-bit-endian
5521 definitions we extract from the Principals of Operation. */
5523 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5531 /* Zero extract the field from the insn. */
5532 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5534 /* Sign-extend, or un-swap the field as necessary. */
5536 case 0: /* unsigned */
5538 case 1: /* signed */
5539 assert(f
->size
<= 32);
5540 m
= 1u << (f
->size
- 1);
5543 case 2: /* dl+dh split, signed 20 bit. */
5544 r
= ((int8_t)r
<< 12) | (r
>> 8);
5550 /* Validate that the "compressed" encoding we selected above is valid.
5551 I.e. we havn't make two different original fields overlap. */
5552 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5553 o
->presentC
|= 1 << f
->indexC
;
5554 o
->presentO
|= 1 << f
->indexO
;
5556 o
->c
[f
->indexC
] = r
;
5559 /* Lookup the insn at the current PC, extracting the operands into O and
5560 returning the info struct for the insn. Returns NULL for invalid insn. */
5562 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5565 uint64_t insn
, pc
= s
->pc
;
5567 const DisasInsn
*info
;
5569 if (unlikely(s
->ex_value
)) {
5570 /* Drop the EX data now, so that it's clear on exception paths. */
5571 TCGv_i64 zero
= tcg_const_i64(0);
5572 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5573 tcg_temp_free_i64(zero
);
5575 /* Extract the values saved by EXECUTE. */
5576 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5577 ilen
= s
->ex_value
& 0xf;
5580 insn
= ld_code2(env
, pc
);
5581 op
= (insn
>> 8) & 0xff;
5582 ilen
= get_ilen(op
);
5588 insn
= ld_code4(env
, pc
) << 32;
5591 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5594 g_assert_not_reached();
5597 s
->next_pc
= s
->pc
+ ilen
;
5600 /* We can't actually determine the insn format until we've looked up
5601 the full insn opcode. Which we can't do without locating the
5602 secondary opcode. Assume by default that OP2 is at bit 40; for
5603 those smaller insns that don't actually have a secondary opcode
5604 this will correctly result in OP2 = 0. */
5610 case 0xb2: /* S, RRF, RRE, IE */
5611 case 0xb3: /* RRE, RRD, RRF */
5612 case 0xb9: /* RRE, RRF */
5613 case 0xe5: /* SSE, SIL */
5614 op2
= (insn
<< 8) >> 56;
5618 case 0xc0: /* RIL */
5619 case 0xc2: /* RIL */
5620 case 0xc4: /* RIL */
5621 case 0xc6: /* RIL */
5622 case 0xc8: /* SSF */
5623 case 0xcc: /* RIL */
5624 op2
= (insn
<< 12) >> 60;
5626 case 0xc5: /* MII */
5627 case 0xc7: /* SMI */
5628 case 0xd0 ... 0xdf: /* SS */
5634 case 0xee ... 0xf3: /* SS */
5635 case 0xf8 ... 0xfd: /* SS */
5639 op2
= (insn
<< 40) >> 56;
5643 memset(f
, 0, sizeof(*f
));
5648 /* Lookup the instruction. */
5649 info
= lookup_opc(op
<< 8 | op2
);
5651 /* If we found it, extract the operands. */
5653 DisasFormat fmt
= info
->fmt
;
5656 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5657 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5663 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5665 const DisasInsn
*insn
;
5666 ExitStatus ret
= NO_EXIT
;
5670 /* Search for the insn in the table. */
5671 insn
= extract_insn(env
, s
, &f
);
5673 /* Not found means unimplemented/illegal opcode. */
5675 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5677 gen_illegal_opcode(s
);
5678 return EXIT_NORETURN
;
5681 #ifndef CONFIG_USER_ONLY
5682 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5683 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5684 gen_helper_per_ifetch(cpu_env
, addr
);
5685 tcg_temp_free_i64(addr
);
5689 /* Check for insn specification exceptions. */
5691 int spec
= insn
->spec
, excp
= 0, r
;
5693 if (spec
& SPEC_r1_even
) {
5694 r
= get_field(&f
, r1
);
5696 excp
= PGM_SPECIFICATION
;
5699 if (spec
& SPEC_r2_even
) {
5700 r
= get_field(&f
, r2
);
5702 excp
= PGM_SPECIFICATION
;
5705 if (spec
& SPEC_r3_even
) {
5706 r
= get_field(&f
, r3
);
5708 excp
= PGM_SPECIFICATION
;
5711 if (spec
& SPEC_r1_f128
) {
5712 r
= get_field(&f
, r1
);
5714 excp
= PGM_SPECIFICATION
;
5717 if (spec
& SPEC_r2_f128
) {
5718 r
= get_field(&f
, r2
);
5720 excp
= PGM_SPECIFICATION
;
5724 gen_program_exception(s
, excp
);
5725 return EXIT_NORETURN
;
5729 /* Set up the strutures we use to communicate with the helpers. */
5732 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5733 TCGV_UNUSED_I64(o
.out
);
5734 TCGV_UNUSED_I64(o
.out2
);
5735 TCGV_UNUSED_I64(o
.in1
);
5736 TCGV_UNUSED_I64(o
.in2
);
5737 TCGV_UNUSED_I64(o
.addr1
);
5739 /* Implement the instruction. */
5740 if (insn
->help_in1
) {
5741 insn
->help_in1(s
, &f
, &o
);
5743 if (insn
->help_in2
) {
5744 insn
->help_in2(s
, &f
, &o
);
5746 if (insn
->help_prep
) {
5747 insn
->help_prep(s
, &f
, &o
);
5749 if (insn
->help_op
) {
5750 ret
= insn
->help_op(s
, &o
);
5752 if (insn
->help_wout
) {
5753 insn
->help_wout(s
, &f
, &o
);
5755 if (insn
->help_cout
) {
5756 insn
->help_cout(s
, &o
);
5759 /* Free any temporaries created by the helpers. */
5760 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5761 tcg_temp_free_i64(o
.out
);
5763 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5764 tcg_temp_free_i64(o
.out2
);
5766 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5767 tcg_temp_free_i64(o
.in1
);
5769 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5770 tcg_temp_free_i64(o
.in2
);
5772 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5773 tcg_temp_free_i64(o
.addr1
);
5776 #ifndef CONFIG_USER_ONLY
5777 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5778 /* An exception might be triggered, save PSW if not already done. */
5779 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5780 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5786 /* Call the helper to check for a possible PER exception. */
5787 gen_helper_per_check_exception(cpu_env
);
5791 /* Advance to the next instruction. */
5796 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
5798 CPUS390XState
*env
= cs
->env_ptr
;
5800 target_ulong pc_start
;
5801 uint64_t next_page_start
;
5802 int num_insns
, max_insns
;
5809 if (!(tb
->flags
& FLAG_MASK_64
)) {
5810 pc_start
&= 0x7fffffff;
5815 dc
.cc_op
= CC_OP_DYNAMIC
;
5816 dc
.ex_value
= tb
->cs_base
;
5817 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5819 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5822 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5823 if (max_insns
== 0) {
5824 max_insns
= CF_COUNT_MASK
;
5826 if (max_insns
> TCG_MAX_INSNS
) {
5827 max_insns
= TCG_MAX_INSNS
;
5833 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5836 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5837 status
= EXIT_PC_STALE
;
5839 /* The address covered by the breakpoint must be included in
5840 [tb->pc, tb->pc + tb->size) in order to for it to be
5841 properly cleared -- thus we increment the PC here so that
5842 the logic setting tb->size below does the right thing. */
5847 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5851 status
= translate_one(env
, &dc
);
5853 /* If we reach a page boundary, are single stepping,
5854 or exhaust instruction count, stop generation. */
5855 if (status
== NO_EXIT
5856 && (dc
.pc
>= next_page_start
5857 || tcg_op_buf_full()
5858 || num_insns
>= max_insns
5860 || cs
->singlestep_enabled
5862 status
= EXIT_PC_STALE
;
5864 } while (status
== NO_EXIT
);
5866 if (tb
->cflags
& CF_LAST_IO
) {
5875 case EXIT_PC_STALE_NOCHAIN
:
5876 update_psw_addr(&dc
);
5878 case EXIT_PC_UPDATED
:
5879 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5880 cc op type is in env */
5883 case EXIT_PC_CC_UPDATED
:
5884 /* Exit the TB, either by raising a debug exception or by return. */
5886 gen_exception(EXCP_DEBUG
);
5887 } else if (use_exit_tb(&dc
) || status
== EXIT_PC_STALE_NOCHAIN
) {
5890 tcg_gen_lookup_and_goto_ptr(psw_addr
);
5894 g_assert_not_reached();
5897 gen_tb_end(tb
, num_insns
);
5899 tb
->size
= dc
.pc
- pc_start
;
5900 tb
->icount
= num_insns
;
5902 #if defined(S390X_DEBUG_DISAS)
5903 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5904 && qemu_log_in_addr_range(pc_start
)) {
5906 if (unlikely(dc
.ex_value
)) {
5907 /* ??? Unfortunately log_target_disas can't use host memory. */
5908 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
5910 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5911 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5919 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5922 int cc_op
= data
[1];
5923 env
->psw
.addr
= data
[0];
5924 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {