target/s390x: Improve general case of disas_jcc
[qemu/kevin.git] / target / s390x / tcg / translate.c
blob3d6a9f44a6854d8230631a4676ef19892622974f
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef HELPER_H
53 /* Information that (most) every instruction needs to manipulate. */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
59 * Define a structure to hold the decoded fields. We'll store each inside
60 * an array indexed by an enum. In order to conserve memory, we'll arrange
61 * for fields that do not exist at the same time to overlap, thus the "C"
62 * for compact. For checking purposes there is an "O" for original index
63 * as well that will be applied to availability bitmaps.
66 enum DisasFieldIndexO {
67 FLD_O_r1,
68 FLD_O_r2,
69 FLD_O_r3,
70 FLD_O_m1,
71 FLD_O_m3,
72 FLD_O_m4,
73 FLD_O_m5,
74 FLD_O_m6,
75 FLD_O_b1,
76 FLD_O_b2,
77 FLD_O_b4,
78 FLD_O_d1,
79 FLD_O_d2,
80 FLD_O_d4,
81 FLD_O_x2,
82 FLD_O_l1,
83 FLD_O_l2,
84 FLD_O_i1,
85 FLD_O_i2,
86 FLD_O_i3,
87 FLD_O_i4,
88 FLD_O_i5,
89 FLD_O_v1,
90 FLD_O_v2,
91 FLD_O_v3,
92 FLD_O_v4,
95 enum DisasFieldIndexC {
96 FLD_C_r1 = 0,
97 FLD_C_m1 = 0,
98 FLD_C_b1 = 0,
99 FLD_C_i1 = 0,
100 FLD_C_v1 = 0,
102 FLD_C_r2 = 1,
103 FLD_C_b2 = 1,
104 FLD_C_i2 = 1,
106 FLD_C_r3 = 2,
107 FLD_C_m3 = 2,
108 FLD_C_i3 = 2,
109 FLD_C_v3 = 2,
111 FLD_C_m4 = 3,
112 FLD_C_b4 = 3,
113 FLD_C_i4 = 3,
114 FLD_C_l1 = 3,
115 FLD_C_v4 = 3,
117 FLD_C_i5 = 4,
118 FLD_C_d1 = 4,
119 FLD_C_m5 = 4,
121 FLD_C_d2 = 5,
122 FLD_C_m6 = 5,
124 FLD_C_d4 = 6,
125 FLD_C_x2 = 6,
126 FLD_C_l2 = 6,
127 FLD_C_v2 = 6,
129 NUM_C_FIELD = 7
132 struct DisasFields {
133 uint64_t raw_insn;
134 unsigned op:8;
135 unsigned op2:8;
136 unsigned presentC:16;
137 unsigned int presentO;
138 int c[NUM_C_FIELD];
141 struct DisasContext {
142 DisasContextBase base;
143 const DisasInsn *insn;
144 TCGOp *insn_start;
145 DisasFields fields;
146 uint64_t ex_value;
148 * During translate_one(), pc_tmp is used to determine the instruction
149 * to be executed after base.pc_next - e.g. next sequential instruction
150 * or a branch target.
152 uint64_t pc_tmp;
153 uint32_t ilen;
154 enum cc_op cc_op;
155 bool exit_to_mainloop;
158 /* Information carried about a condition to be evaluated. */
159 typedef struct {
160 TCGCond cond:8;
161 bool is_64;
162 union {
163 struct { TCGv_i64 a, b; } s64;
164 struct { TCGv_i32 a, b; } s32;
165 } u;
166 } DisasCompare;
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
175 if (s->base.tb->flags & FLAG_MASK_32) {
176 if (s->base.tb->flags & FLAG_MASK_64) {
177 tcg_gen_movi_i64(out, pc);
178 return;
180 pc |= 0x80000000;
182 assert(!(s->base.tb->flags & FLAG_MASK_64));
183 tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
198 void s390x_translate_init(void)
200 int i;
202 psw_addr = tcg_global_mem_new_i64(tcg_env,
203 offsetof(CPUS390XState, psw.addr),
204 "psw_addr");
205 psw_mask = tcg_global_mem_new_i64(tcg_env,
206 offsetof(CPUS390XState, psw.mask),
207 "psw_mask");
208 gbea = tcg_global_mem_new_i64(tcg_env,
209 offsetof(CPUS390XState, gbea),
210 "gbea");
212 cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
213 "cc_op");
214 cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
215 "cc_src");
216 cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
217 "cc_dst");
218 cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
219 "cc_vr");
221 for (i = 0; i < 16; i++) {
222 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223 regs[i] = tcg_global_mem_new(tcg_env,
224 offsetof(CPUS390XState, regs[i]),
225 cpu_reg_names[i]);
229 static inline int vec_full_reg_offset(uint8_t reg)
231 g_assert(reg < 32);
232 return offsetof(CPUS390XState, vregs[reg][0]);
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
237 /* Convert element size (es) - e.g. MO_8 - to bytes */
238 const uint8_t bytes = 1 << es;
239 int offs = enr * bytes;
242 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243 * of the 16 byte vector, on both, little and big endian systems.
245 * Big Endian (target/possible host)
246 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
248 * W: [ 0][ 1] - [ 2][ 3]
249 * DW: [ 0] - [ 1]
251 * Little Endian (possible host)
252 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
254 * W: [ 1][ 0] - [ 3][ 2]
255 * DW: [ 0] - [ 1]
257 * For 16 byte elements, the two 8 byte halves will not form a host
258 * int128 if the host is little endian, since they're in the wrong order.
259 * Some operations (e.g. xor) do not care. For operations like addition,
260 * the two 8 byte elements have to be loaded separately. Let's force all
261 * 16 byte operations to handle it in a special way.
263 g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265 offs ^= (8 - bytes);
266 #endif
267 return offs + vec_full_reg_offset(reg);
270 static inline int freg64_offset(uint8_t reg)
272 g_assert(reg < 16);
273 return vec_reg_offset(reg, 0, MO_64);
276 static inline int freg32_offset(uint8_t reg)
278 g_assert(reg < 16);
279 return vec_reg_offset(reg, 0, MO_32);
282 static TCGv_i64 load_reg(int reg)
284 TCGv_i64 r = tcg_temp_new_i64();
285 tcg_gen_mov_i64(r, regs[reg]);
286 return r;
289 static TCGv_i64 load_freg(int reg)
291 TCGv_i64 r = tcg_temp_new_i64();
293 tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
294 return r;
297 static TCGv_i64 load_freg32_i64(int reg)
299 TCGv_i64 r = tcg_temp_new_i64();
301 tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
302 return r;
305 static TCGv_i128 load_freg_128(int reg)
307 TCGv_i64 h = load_freg(reg);
308 TCGv_i64 l = load_freg(reg + 2);
309 TCGv_i128 r = tcg_temp_new_i128();
311 tcg_gen_concat_i64_i128(r, l, h);
312 return r;
315 static void store_reg(int reg, TCGv_i64 v)
317 tcg_gen_mov_i64(regs[reg], v);
320 static void store_freg(int reg, TCGv_i64 v)
322 tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
325 static void store_reg32_i64(int reg, TCGv_i64 v)
327 /* 32 bit register writes keep the upper half */
328 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
333 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
336 static void store_freg32_i64(int reg, TCGv_i64 v)
338 tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
341 static void update_psw_addr(DisasContext *s)
343 /* psw.addr */
344 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
347 static void per_branch(DisasContext *s, bool to_next)
349 #ifndef CONFIG_USER_ONLY
350 tcg_gen_movi_i64(gbea, s->base.pc_next);
352 if (s->base.tb->flags & FLAG_MASK_PER) {
353 TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354 gen_helper_per_branch(tcg_env, gbea, next_pc);
356 #endif
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360 TCGv_i64 arg1, TCGv_i64 arg2)
362 #ifndef CONFIG_USER_ONLY
363 if (s->base.tb->flags & FLAG_MASK_PER) {
364 TCGLabel *lab = gen_new_label();
365 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
367 tcg_gen_movi_i64(gbea, s->base.pc_next);
368 gen_helper_per_branch(tcg_env, gbea, psw_addr);
370 gen_set_label(lab);
371 } else {
372 TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 #endif
378 static void per_breaking_event(DisasContext *s)
380 tcg_gen_movi_i64(gbea, s->base.pc_next);
383 static void update_cc_op(DisasContext *s)
385 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386 tcg_gen_movi_i32(cc_op, s->cc_op);
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391 uint64_t pc)
393 return (uint64_t)translator_lduw(env, &s->base, pc);
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397 uint64_t pc)
399 return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
402 static int get_mem_index(DisasContext *s)
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 g_assert_not_reached();
420 break;
422 #endif
425 static void gen_exception(int excp)
427 gen_helper_exception(tcg_env, tcg_constant_i32(excp));
430 static void gen_program_exception(DisasContext *s, int code)
432 /* Remember what pgm exception this was. */
433 tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
434 offsetof(CPUS390XState, int_pgm_code));
436 tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
437 offsetof(CPUS390XState, int_pgm_ilen));
439 /* update the psw */
440 update_psw_addr(s);
442 /* Save off cc. */
443 update_cc_op(s);
445 /* Trigger exception. */
446 gen_exception(EXCP_PGM);
449 static inline void gen_illegal_opcode(DisasContext *s)
451 gen_program_exception(s, PGM_OPERATION);
454 static inline void gen_data_exception(uint8_t dxc)
456 gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
459 static inline void gen_trap(DisasContext *s)
461 /* Set DXC to 0xff */
462 gen_data_exception(0xff);
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466 int64_t imm)
468 tcg_gen_addi_i64(dst, src, imm);
469 if (!(s->base.tb->flags & FLAG_MASK_64)) {
470 if (s->base.tb->flags & FLAG_MASK_32) {
471 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472 } else {
473 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
480 TCGv_i64 tmp = tcg_temp_new_i64();
483 * Note that d2 is limited to 20 bits, signed. If we crop negative
484 * displacements early we create larger immediate addends.
486 if (b2 && x2) {
487 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489 } else if (b2) {
490 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491 } else if (x2) {
492 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494 if (s->base.tb->flags & FLAG_MASK_32) {
495 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496 } else {
497 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
499 } else {
500 tcg_gen_movi_i64(tmp, d2);
503 return tmp;
506 static inline bool live_cc_data(DisasContext *s)
508 return (s->cc_op != CC_OP_DYNAMIC
509 && s->cc_op != CC_OP_STATIC
510 && s->cc_op > 3);
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
515 if (live_cc_data(s)) {
516 tcg_gen_discard_i64(cc_src);
517 tcg_gen_discard_i64(cc_dst);
518 tcg_gen_discard_i64(cc_vr);
520 s->cc_op = CC_OP_CONST0 + val;
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
525 if (live_cc_data(s)) {
526 tcg_gen_discard_i64(cc_src);
527 tcg_gen_discard_i64(cc_vr);
529 tcg_gen_mov_i64(cc_dst, dst);
530 s->cc_op = op;
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534 TCGv_i64 dst)
536 if (live_cc_data(s)) {
537 tcg_gen_discard_i64(cc_vr);
539 tcg_gen_mov_i64(cc_src, src);
540 tcg_gen_mov_i64(cc_dst, dst);
541 s->cc_op = op;
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545 TCGv_i64 dst, TCGv_i64 vr)
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 tcg_gen_mov_i64(cc_vr, vr);
550 s->cc_op = op;
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
555 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
561 if (live_cc_data(s)) {
562 tcg_gen_discard_i64(cc_src);
563 tcg_gen_discard_i64(cc_dst);
564 tcg_gen_discard_i64(cc_vr);
566 s->cc_op = CC_OP_STATIC;
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
572 TCGv_i32 local_cc_op = NULL;
573 TCGv_i64 dummy = NULL;
575 switch (s->cc_op) {
576 default:
577 dummy = tcg_constant_i64(0);
578 /* FALLTHRU */
579 case CC_OP_ADD_64:
580 case CC_OP_SUB_64:
581 case CC_OP_ADD_32:
582 case CC_OP_SUB_32:
583 local_cc_op = tcg_constant_i32(s->cc_op);
584 break;
585 case CC_OP_CONST0:
586 case CC_OP_CONST1:
587 case CC_OP_CONST2:
588 case CC_OP_CONST3:
589 case CC_OP_STATIC:
590 case CC_OP_DYNAMIC:
591 break;
594 switch (s->cc_op) {
595 case CC_OP_CONST0:
596 case CC_OP_CONST1:
597 case CC_OP_CONST2:
598 case CC_OP_CONST3:
599 /* s->cc_op is the cc value */
600 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601 break;
602 case CC_OP_STATIC:
603 /* env->cc_op already is the cc value */
604 break;
605 case CC_OP_NZ:
606 tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607 tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608 break;
609 case CC_OP_ABS_64:
610 case CC_OP_NABS_64:
611 case CC_OP_ABS_32:
612 case CC_OP_NABS_32:
613 case CC_OP_LTGT0_32:
614 case CC_OP_LTGT0_64:
615 case CC_OP_COMP_32:
616 case CC_OP_COMP_64:
617 case CC_OP_NZ_F32:
618 case CC_OP_NZ_F64:
619 case CC_OP_FLOGR:
620 case CC_OP_LCBB:
621 case CC_OP_MULS_32:
622 /* 1 argument */
623 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
624 break;
625 case CC_OP_ADDU:
626 case CC_OP_ICM:
627 case CC_OP_LTGT_32:
628 case CC_OP_LTGT_64:
629 case CC_OP_LTUGTU_32:
630 case CC_OP_LTUGTU_64:
631 case CC_OP_TM_32:
632 case CC_OP_TM_64:
633 case CC_OP_SLA:
634 case CC_OP_SUBU:
635 case CC_OP_NZ_F128:
636 case CC_OP_VC:
637 case CC_OP_MULS_64:
638 /* 2 arguments */
639 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
640 break;
641 case CC_OP_ADD_64:
642 case CC_OP_SUB_64:
643 case CC_OP_ADD_32:
644 case CC_OP_SUB_32:
645 /* 3 arguments */
646 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
647 break;
648 case CC_OP_DYNAMIC:
649 /* unknown operation - assume 3 arguments and cc_op in env */
650 gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
651 break;
652 default:
653 g_assert_not_reached();
656 /* We now have cc in cc_op as constant */
657 set_cc_static(s);
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
662 if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663 return false;
665 return translator_use_goto_tb(&s->base, dest);
668 static void account_noninline_branch(DisasContext *s, int cc_op)
670 #ifdef DEBUG_INLINE_BRANCHES
671 inline_branch_miss[cc_op]++;
672 #endif
675 static void account_inline_branch(DisasContext *s, int cc_op)
677 #ifdef DEBUG_INLINE_BRANCHES
678 inline_branch_hit[cc_op]++;
679 #endif
682 /* Table of mask values to comparison codes, given a comparison as input.
683 For such, CC=3 should not be possible. */
684 static const TCGCond ltgt_cond[16] = {
685 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
686 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
687 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
688 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
689 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
690 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
691 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
692 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
695 /* Table of mask values to comparison codes, given a logic op as input.
696 For such, only CC=0 and CC=1 should be possible. */
697 static const TCGCond nz_cond[16] = {
698 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
699 TCG_COND_NEVER, TCG_COND_NEVER,
700 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
701 TCG_COND_NE, TCG_COND_NE,
702 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
703 TCG_COND_EQ, TCG_COND_EQ,
704 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
705 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709 details required to generate a TCG comparison. */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
712 TCGCond cond;
713 enum cc_op old_cc_op = s->cc_op;
715 if (mask == 15 || mask == 0) {
716 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717 c->u.s32.a = cc_op;
718 c->u.s32.b = cc_op;
719 c->is_64 = false;
720 return;
723 /* Find the TCG condition for the mask + cc op. */
724 switch (old_cc_op) {
725 case CC_OP_LTGT0_32:
726 case CC_OP_LTGT0_64:
727 case CC_OP_LTGT_32:
728 case CC_OP_LTGT_64:
729 cond = ltgt_cond[mask];
730 if (cond == TCG_COND_NEVER) {
731 goto do_dynamic;
733 account_inline_branch(s, old_cc_op);
734 break;
736 case CC_OP_LTUGTU_32:
737 case CC_OP_LTUGTU_64:
738 cond = tcg_unsigned_cond(ltgt_cond[mask]);
739 if (cond == TCG_COND_NEVER) {
740 goto do_dynamic;
742 account_inline_branch(s, old_cc_op);
743 break;
745 case CC_OP_NZ:
746 cond = nz_cond[mask];
747 if (cond == TCG_COND_NEVER) {
748 goto do_dynamic;
750 account_inline_branch(s, old_cc_op);
751 break;
753 case CC_OP_TM_32:
754 case CC_OP_TM_64:
755 switch (mask) {
756 case 8:
757 cond = TCG_COND_TSTEQ;
758 break;
759 case 4 | 2 | 1:
760 cond = TCG_COND_TSTNE;
761 break;
762 default:
763 goto do_dynamic;
765 account_inline_branch(s, old_cc_op);
766 break;
768 case CC_OP_ICM:
769 switch (mask) {
770 case 8:
771 cond = TCG_COND_TSTEQ;
772 break;
773 case 4 | 2 | 1:
774 case 4 | 2:
775 cond = TCG_COND_TSTNE;
776 break;
777 default:
778 goto do_dynamic;
780 account_inline_branch(s, old_cc_op);
781 break;
783 case CC_OP_FLOGR:
784 switch (mask & 0xa) {
785 case 8: /* src == 0 -> no one bit found */
786 cond = TCG_COND_EQ;
787 break;
788 case 2: /* src != 0 -> one bit found */
789 cond = TCG_COND_NE;
790 break;
791 default:
792 goto do_dynamic;
794 account_inline_branch(s, old_cc_op);
795 break;
797 case CC_OP_ADDU:
798 case CC_OP_SUBU:
799 switch (mask) {
800 case 8 | 2: /* result == 0 */
801 cond = TCG_COND_EQ;
802 break;
803 case 4 | 1: /* result != 0 */
804 cond = TCG_COND_NE;
805 break;
806 case 8 | 4: /* !carry (borrow) */
807 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808 break;
809 case 2 | 1: /* carry (!borrow) */
810 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811 break;
812 default:
813 goto do_dynamic;
815 account_inline_branch(s, old_cc_op);
816 break;
818 default:
819 do_dynamic:
820 /* Calculate cc value. */
821 gen_op_calc_cc(s);
822 /* FALLTHRU */
824 case CC_OP_STATIC:
825 /* Jump based on CC. We'll load up the real cond below;
826 the assignment here merely avoids a compiler warning. */
827 account_noninline_branch(s, old_cc_op);
828 old_cc_op = CC_OP_STATIC;
829 cond = TCG_COND_NEVER;
830 break;
833 /* Load up the arguments of the comparison. */
834 c->is_64 = true;
835 switch (old_cc_op) {
836 case CC_OP_LTGT0_32:
837 c->is_64 = false;
838 c->u.s32.a = tcg_temp_new_i32();
839 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840 c->u.s32.b = tcg_constant_i32(0);
841 break;
842 case CC_OP_LTGT_32:
843 case CC_OP_LTUGTU_32:
844 c->is_64 = false;
845 c->u.s32.a = tcg_temp_new_i32();
846 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847 c->u.s32.b = tcg_temp_new_i32();
848 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849 break;
851 case CC_OP_LTGT0_64:
852 case CC_OP_NZ:
853 case CC_OP_FLOGR:
854 c->u.s64.a = cc_dst;
855 c->u.s64.b = tcg_constant_i64(0);
856 break;
858 case CC_OP_LTGT_64:
859 case CC_OP_LTUGTU_64:
860 case CC_OP_TM_32:
861 case CC_OP_TM_64:
862 case CC_OP_ICM:
863 c->u.s64.a = cc_src;
864 c->u.s64.b = cc_dst;
865 break;
867 case CC_OP_ADDU:
868 case CC_OP_SUBU:
869 c->is_64 = true;
870 c->u.s64.b = tcg_constant_i64(0);
871 switch (mask) {
872 case 8 | 2:
873 case 4 | 1: /* result */
874 c->u.s64.a = cc_dst;
875 break;
876 case 8 | 4:
877 case 2 | 1: /* carry */
878 c->u.s64.a = cc_src;
879 break;
880 default:
881 g_assert_not_reached();
883 break;
885 case CC_OP_STATIC:
886 c->is_64 = false;
887 c->u.s32.a = cc_op;
889 /* Fold half of the cases using bit 3 to invert. */
890 switch (mask & 8 ? mask ^ 0xf : mask) {
891 case 0x1: /* cc == 3 */
892 cond = TCG_COND_EQ;
893 c->u.s32.b = tcg_constant_i32(3);
894 break;
895 case 0x2: /* cc == 2 */
896 cond = TCG_COND_EQ;
897 c->u.s32.b = tcg_constant_i32(2);
898 break;
899 case 0x4: /* cc == 1 */
900 cond = TCG_COND_EQ;
901 c->u.s32.b = tcg_constant_i32(1);
902 break;
903 case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
904 cond = TCG_COND_GTU;
905 c->u.s32.b = tcg_constant_i32(1);
906 break;
907 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
908 cond = TCG_COND_TSTNE;
909 c->u.s32.b = tcg_constant_i32(1);
910 break;
911 case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
912 cond = TCG_COND_LEU;
913 c->u.s32.a = tcg_temp_new_i32();
914 c->u.s32.b = tcg_constant_i32(1);
915 tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
916 break;
917 case 0x4 | 0x2 | 0x1: /* cc != 0 */
918 cond = TCG_COND_NE;
919 c->u.s32.b = tcg_constant_i32(0);
920 break;
921 default:
922 /* case 0: never, handled above. */
923 g_assert_not_reached();
925 if (mask & 8) {
926 cond = tcg_invert_cond(cond);
928 break;
930 default:
931 abort();
933 c->cond = cond;
936 /* ====================================================================== */
937 /* Define the insn format enumeration. */
938 #define F0(N) FMT_##N,
939 #define F1(N, X1) F0(N)
940 #define F2(N, X1, X2) F0(N)
941 #define F3(N, X1, X2, X3) F0(N)
942 #define F4(N, X1, X2, X3, X4) F0(N)
943 #define F5(N, X1, X2, X3, X4, X5) F0(N)
944 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
946 typedef enum {
947 #include "insn-format.h.inc"
948 } DisasFormat;
950 #undef F0
951 #undef F1
952 #undef F2
953 #undef F3
954 #undef F4
955 #undef F5
956 #undef F6
958 /* This is the way fields are to be accessed out of DisasFields. */
959 #define have_field(S, F) have_field1((S), FLD_O_##F)
960 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
962 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
964 return (s->fields.presentO >> c) & 1;
967 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
968 enum DisasFieldIndexC c)
970 assert(have_field1(s, o));
971 return s->fields.c[c];
974 /* Describe the layout of each field in each format. */
975 typedef struct DisasField {
976 unsigned int beg:8;
977 unsigned int size:8;
978 unsigned int type:2;
979 unsigned int indexC:6;
980 enum DisasFieldIndexO indexO:8;
981 } DisasField;
983 typedef struct DisasFormatInfo {
984 DisasField op[NUM_C_FIELD];
985 } DisasFormatInfo;
987 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
988 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
989 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
990 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
991 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
992 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
993 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
994 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
995 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
996 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
997 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
998 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
999 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1000 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1001 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1003 #define F0(N) { { } },
1004 #define F1(N, X1) { { X1 } },
1005 #define F2(N, X1, X2) { { X1, X2 } },
1006 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1007 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1008 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1009 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1011 static const DisasFormatInfo format_info[] = {
1012 #include "insn-format.h.inc"
1015 #undef F0
1016 #undef F1
1017 #undef F2
1018 #undef F3
1019 #undef F4
1020 #undef F5
1021 #undef F6
1022 #undef R
1023 #undef M
1024 #undef V
1025 #undef BD
1026 #undef BXD
1027 #undef BDL
1028 #undef BXDL
1029 #undef I
1030 #undef L
1032 /* Generally, we'll extract operands into this structures, operate upon
1033 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1034 of routines below for more details. */
1035 typedef struct {
1036 TCGv_i64 out, out2, in1, in2;
1037 TCGv_i64 addr1;
1038 TCGv_i128 out_128, in1_128, in2_128;
1039 } DisasOps;
1041 /* Instructions can place constraints on their operands, raising specification
1042 exceptions if they are violated. To make this easy to automate, each "in1",
1043 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1044 of the following, or 0. To make this easy to document, we'll put the
1045 SPEC_<name> defines next to <name>. */
1047 #define SPEC_r1_even 1
1048 #define SPEC_r2_even 2
1049 #define SPEC_r3_even 4
1050 #define SPEC_r1_f128 8
1051 #define SPEC_r2_f128 16
1053 /* Return values from translate_one, indicating the state of the TB. */
1055 /* We are not using a goto_tb (for whatever reason), but have updated
1056 the PC (for whatever reason), so there's no need to do it again on
1057 exiting the TB. */
1058 #define DISAS_PC_UPDATED DISAS_TARGET_0
1060 /* We have updated the PC and CC values. */
1061 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1064 /* Instruction flags */
1065 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1066 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1067 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1068 #define IF_BFP 0x0008 /* binary floating point instruction */
1069 #define IF_DFP 0x0010 /* decimal floating point instruction */
1070 #define IF_PRIV 0x0020 /* privileged instruction */
1071 #define IF_VEC 0x0040 /* vector instruction */
1072 #define IF_IO 0x0080 /* input/output instruction */
1074 struct DisasInsn {
1075 unsigned opc:16;
1076 unsigned flags:16;
1077 DisasFormat fmt:8;
1078 unsigned fac:8;
1079 unsigned spec:8;
1081 const char *name;
1083 /* Pre-process arguments before HELP_OP. */
1084 void (*help_in1)(DisasContext *, DisasOps *);
1085 void (*help_in2)(DisasContext *, DisasOps *);
1086 void (*help_prep)(DisasContext *, DisasOps *);
1089 * Post-process output after HELP_OP.
1090 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1092 void (*help_wout)(DisasContext *, DisasOps *);
1093 void (*help_cout)(DisasContext *, DisasOps *);
1095 /* Implement the operation itself. */
1096 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1098 uint64_t data;
1101 /* ====================================================================== */
1102 /* Miscellaneous helpers, used by several operations. */
1104 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1106 if (dest == s->pc_tmp) {
1107 per_branch(s, true);
1108 return DISAS_NEXT;
1110 if (use_goto_tb(s, dest)) {
1111 update_cc_op(s);
1112 per_breaking_event(s);
1113 tcg_gen_goto_tb(0);
1114 tcg_gen_movi_i64(psw_addr, dest);
1115 tcg_gen_exit_tb(s->base.tb, 0);
1116 return DISAS_NORETURN;
1117 } else {
1118 tcg_gen_movi_i64(psw_addr, dest);
1119 per_branch(s, false);
1120 return DISAS_PC_UPDATED;
1124 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1125 bool is_imm, int imm, TCGv_i64 cdest)
1127 DisasJumpType ret;
1128 uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1129 TCGLabel *lab;
1131 /* Take care of the special cases first. */
1132 if (c->cond == TCG_COND_NEVER) {
1133 ret = DISAS_NEXT;
1134 goto egress;
1136 if (is_imm) {
1137 if (dest == s->pc_tmp) {
1138 /* Branch to next. */
1139 per_branch(s, true);
1140 ret = DISAS_NEXT;
1141 goto egress;
1143 if (c->cond == TCG_COND_ALWAYS) {
1144 ret = help_goto_direct(s, dest);
1145 goto egress;
1147 } else {
1148 if (!cdest) {
1149 /* E.g. bcr %r0 -> no branch. */
1150 ret = DISAS_NEXT;
1151 goto egress;
1153 if (c->cond == TCG_COND_ALWAYS) {
1154 tcg_gen_mov_i64(psw_addr, cdest);
1155 per_branch(s, false);
1156 ret = DISAS_PC_UPDATED;
1157 goto egress;
1161 if (use_goto_tb(s, s->pc_tmp)) {
1162 if (is_imm && use_goto_tb(s, dest)) {
1163 /* Both exits can use goto_tb. */
1164 update_cc_op(s);
1166 lab = gen_new_label();
1167 if (c->is_64) {
1168 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1169 } else {
1170 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1173 /* Branch not taken. */
1174 tcg_gen_goto_tb(0);
1175 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1176 tcg_gen_exit_tb(s->base.tb, 0);
1178 /* Branch taken. */
1179 gen_set_label(lab);
1180 per_breaking_event(s);
1181 tcg_gen_goto_tb(1);
1182 tcg_gen_movi_i64(psw_addr, dest);
1183 tcg_gen_exit_tb(s->base.tb, 1);
1185 ret = DISAS_NORETURN;
1186 } else {
1187 /* Fallthru can use goto_tb, but taken branch cannot. */
1188 /* Store taken branch destination before the brcond. This
1189 avoids having to allocate a new local temp to hold it.
1190 We'll overwrite this in the not taken case anyway. */
1191 if (!is_imm) {
1192 tcg_gen_mov_i64(psw_addr, cdest);
1195 lab = gen_new_label();
1196 if (c->is_64) {
1197 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1198 } else {
1199 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1202 /* Branch not taken. */
1203 update_cc_op(s);
1204 tcg_gen_goto_tb(0);
1205 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1206 tcg_gen_exit_tb(s->base.tb, 0);
1208 gen_set_label(lab);
1209 if (is_imm) {
1210 tcg_gen_movi_i64(psw_addr, dest);
1212 per_breaking_event(s);
1213 ret = DISAS_PC_UPDATED;
1215 } else {
1216 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1217 Most commonly we're single-stepping or some other condition that
1218 disables all use of goto_tb. Just update the PC and exit. */
1220 TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1221 if (is_imm) {
1222 cdest = tcg_constant_i64(dest);
1225 if (c->is_64) {
1226 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1227 cdest, next);
1228 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1229 } else {
1230 TCGv_i32 t0 = tcg_temp_new_i32();
1231 TCGv_i64 t1 = tcg_temp_new_i64();
1232 TCGv_i64 z = tcg_constant_i64(0);
1233 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1234 tcg_gen_extu_i32_i64(t1, t0);
1235 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1236 per_branch_cond(s, TCG_COND_NE, t1, z);
1239 ret = DISAS_PC_UPDATED;
1242 egress:
1243 return ret;
1246 /* ====================================================================== */
1247 /* The operations. These perform the bulk of the work for any insn,
1248 usually after the operands have been loaded and output initialized. */
1250 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1252 tcg_gen_abs_i64(o->out, o->in2);
1253 return DISAS_NEXT;
1256 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1258 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1259 return DISAS_NEXT;
1262 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1264 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1265 return DISAS_NEXT;
1268 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1270 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1271 tcg_gen_mov_i64(o->out2, o->in2);
1272 return DISAS_NEXT;
1275 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1277 tcg_gen_add_i64(o->out, o->in1, o->in2);
1278 return DISAS_NEXT;
1281 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1283 tcg_gen_movi_i64(cc_src, 0);
1284 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1285 return DISAS_NEXT;
1288 /* Compute carry into cc_src. */
1289 static void compute_carry(DisasContext *s)
1291 switch (s->cc_op) {
1292 case CC_OP_ADDU:
1293 /* The carry value is already in cc_src (1,0). */
1294 break;
1295 case CC_OP_SUBU:
1296 tcg_gen_addi_i64(cc_src, cc_src, 1);
1297 break;
1298 default:
1299 gen_op_calc_cc(s);
1300 /* fall through */
1301 case CC_OP_STATIC:
1302 /* The carry flag is the msb of CC; compute into cc_src. */
1303 tcg_gen_extu_i32_i64(cc_src, cc_op);
1304 tcg_gen_shri_i64(cc_src, cc_src, 1);
1305 break;
1309 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1311 compute_carry(s);
1312 tcg_gen_add_i64(o->out, o->in1, o->in2);
1313 tcg_gen_add_i64(o->out, o->out, cc_src);
1314 return DISAS_NEXT;
1317 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1319 compute_carry(s);
1321 TCGv_i64 zero = tcg_constant_i64(0);
1322 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1323 tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1325 return DISAS_NEXT;
1328 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1330 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1332 o->in1 = tcg_temp_new_i64();
1333 if (non_atomic) {
1334 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1335 } else {
1336 /* Perform the atomic addition in memory. */
1337 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1338 s->insn->data);
1341 /* Recompute also for atomic case: needed for setting CC. */
1342 tcg_gen_add_i64(o->out, o->in1, o->in2);
1344 if (non_atomic) {
1345 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1347 return DISAS_NEXT;
1350 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1352 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1354 o->in1 = tcg_temp_new_i64();
1355 if (non_atomic) {
1356 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1357 } else {
1358 /* Perform the atomic addition in memory. */
1359 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1360 s->insn->data);
1363 /* Recompute also for atomic case: needed for setting CC. */
1364 tcg_gen_movi_i64(cc_src, 0);
1365 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1367 if (non_atomic) {
1368 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1370 return DISAS_NEXT;
1373 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1375 gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1376 return DISAS_NEXT;
1379 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1381 gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1382 return DISAS_NEXT;
1385 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1387 gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1388 return DISAS_NEXT;
1391 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1393 tcg_gen_and_i64(o->out, o->in1, o->in2);
1394 return DISAS_NEXT;
1397 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1399 int shift = s->insn->data & 0xff;
1400 int size = s->insn->data >> 8;
1401 uint64_t mask = ((1ull << size) - 1) << shift;
1402 TCGv_i64 t = tcg_temp_new_i64();
1404 tcg_gen_shli_i64(t, o->in2, shift);
1405 tcg_gen_ori_i64(t, t, ~mask);
1406 tcg_gen_and_i64(o->out, o->in1, t);
1408 /* Produce the CC from only the bits manipulated. */
1409 tcg_gen_andi_i64(cc_dst, o->out, mask);
1410 set_cc_nz_u64(s, cc_dst);
1411 return DISAS_NEXT;
1414 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1416 tcg_gen_andc_i64(o->out, o->in1, o->in2);
1417 return DISAS_NEXT;
1420 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1422 tcg_gen_orc_i64(o->out, o->in1, o->in2);
1423 return DISAS_NEXT;
1426 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1428 tcg_gen_nand_i64(o->out, o->in1, o->in2);
1429 return DISAS_NEXT;
1432 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1434 tcg_gen_nor_i64(o->out, o->in1, o->in2);
1435 return DISAS_NEXT;
1438 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1440 tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1441 return DISAS_NEXT;
1444 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1446 o->in1 = tcg_temp_new_i64();
1448 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1449 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1450 } else {
1451 /* Perform the atomic operation in memory. */
1452 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1453 s->insn->data);
1456 /* Recompute also for atomic case: needed for setting CC. */
1457 tcg_gen_and_i64(o->out, o->in1, o->in2);
1459 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1460 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1462 return DISAS_NEXT;
1465 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1467 pc_to_link_info(o->out, s, s->pc_tmp);
1468 if (o->in2) {
1469 tcg_gen_mov_i64(psw_addr, o->in2);
1470 per_branch(s, false);
1471 return DISAS_PC_UPDATED;
1472 } else {
1473 return DISAS_NEXT;
1477 static void save_link_info(DisasContext *s, DisasOps *o)
1479 TCGv_i64 t;
1481 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1482 pc_to_link_info(o->out, s, s->pc_tmp);
1483 return;
1485 gen_op_calc_cc(s);
1486 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1487 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1488 t = tcg_temp_new_i64();
1489 tcg_gen_shri_i64(t, psw_mask, 16);
1490 tcg_gen_andi_i64(t, t, 0x0f000000);
1491 tcg_gen_or_i64(o->out, o->out, t);
1492 tcg_gen_extu_i32_i64(t, cc_op);
1493 tcg_gen_shli_i64(t, t, 28);
1494 tcg_gen_or_i64(o->out, o->out, t);
1497 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1499 save_link_info(s, o);
1500 if (o->in2) {
1501 tcg_gen_mov_i64(psw_addr, o->in2);
1502 per_branch(s, false);
1503 return DISAS_PC_UPDATED;
1504 } else {
1505 return DISAS_NEXT;
1510 * Disassemble the target of a branch. The results are returned in a form
1511 * suitable for passing into help_branch():
1513 * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1514 * branches, whose DisasContext *S contains the relative immediate field RI,
1515 * are considered fixed. All the other branches are considered computed.
1516 * - int IMM is the value of RI.
1517 * - TCGv_i64 CDEST is the address of the computed target.
1519 #define disas_jdest(s, ri, is_imm, imm, cdest) do { \
1520 if (have_field(s, ri)) { \
1521 if (unlikely(s->ex_value)) { \
1522 cdest = tcg_temp_new_i64(); \
1523 tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1524 tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \
1525 is_imm = false; \
1526 } else { \
1527 is_imm = true; \
1529 } else { \
1530 is_imm = false; \
1532 imm = is_imm ? get_field(s, ri) : 0; \
1533 } while (false)
1535 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1537 DisasCompare c;
1538 bool is_imm;
1539 int imm;
1541 pc_to_link_info(o->out, s, s->pc_tmp);
1543 disas_jdest(s, i2, is_imm, imm, o->in2);
1544 disas_jcc(s, &c, 0xf);
1545 return help_branch(s, &c, is_imm, imm, o->in2);
1548 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1550 int m1 = get_field(s, m1);
1551 DisasCompare c;
1552 bool is_imm;
1553 int imm;
1555 /* BCR with R2 = 0 causes no branching */
1556 if (have_field(s, r2) && get_field(s, r2) == 0) {
1557 if (m1 == 14) {
1558 /* Perform serialization */
1559 /* FIXME: check for fast-BCR-serialization facility */
1560 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1562 if (m1 == 15) {
1563 /* Perform serialization */
1564 /* FIXME: perform checkpoint-synchronisation */
1565 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1567 return DISAS_NEXT;
1570 disas_jdest(s, i2, is_imm, imm, o->in2);
1571 disas_jcc(s, &c, m1);
1572 return help_branch(s, &c, is_imm, imm, o->in2);
1575 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1577 int r1 = get_field(s, r1);
1578 DisasCompare c;
1579 bool is_imm;
1580 TCGv_i64 t;
1581 int imm;
1583 c.cond = TCG_COND_NE;
1584 c.is_64 = false;
1586 t = tcg_temp_new_i64();
1587 tcg_gen_subi_i64(t, regs[r1], 1);
1588 store_reg32_i64(r1, t);
1589 c.u.s32.a = tcg_temp_new_i32();
1590 c.u.s32.b = tcg_constant_i32(0);
1591 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1593 disas_jdest(s, i2, is_imm, imm, o->in2);
1594 return help_branch(s, &c, is_imm, imm, o->in2);
1597 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1599 int r1 = get_field(s, r1);
1600 int imm = get_field(s, i2);
1601 DisasCompare c;
1602 TCGv_i64 t;
1604 c.cond = TCG_COND_NE;
1605 c.is_64 = false;
1607 t = tcg_temp_new_i64();
1608 tcg_gen_shri_i64(t, regs[r1], 32);
1609 tcg_gen_subi_i64(t, t, 1);
1610 store_reg32h_i64(r1, t);
1611 c.u.s32.a = tcg_temp_new_i32();
1612 c.u.s32.b = tcg_constant_i32(0);
1613 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1615 return help_branch(s, &c, 1, imm, o->in2);
1618 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1620 int r1 = get_field(s, r1);
1621 DisasCompare c;
1622 bool is_imm;
1623 int imm;
1625 c.cond = TCG_COND_NE;
1626 c.is_64 = true;
1628 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1629 c.u.s64.a = regs[r1];
1630 c.u.s64.b = tcg_constant_i64(0);
1632 disas_jdest(s, i2, is_imm, imm, o->in2);
1633 return help_branch(s, &c, is_imm, imm, o->in2);
1636 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1638 int r1 = get_field(s, r1);
1639 int r3 = get_field(s, r3);
1640 DisasCompare c;
1641 bool is_imm;
1642 TCGv_i64 t;
1643 int imm;
1645 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1646 c.is_64 = false;
1648 t = tcg_temp_new_i64();
1649 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1650 c.u.s32.a = tcg_temp_new_i32();
1651 c.u.s32.b = tcg_temp_new_i32();
1652 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1653 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1654 store_reg32_i64(r1, t);
1656 disas_jdest(s, i2, is_imm, imm, o->in2);
1657 return help_branch(s, &c, is_imm, imm, o->in2);
1660 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1662 int r1 = get_field(s, r1);
1663 int r3 = get_field(s, r3);
1664 DisasCompare c;
1665 bool is_imm;
1666 int imm;
1668 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1669 c.is_64 = true;
1671 if (r1 == (r3 | 1)) {
1672 c.u.s64.b = load_reg(r3 | 1);
1673 } else {
1674 c.u.s64.b = regs[r3 | 1];
1677 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1678 c.u.s64.a = regs[r1];
1680 disas_jdest(s, i2, is_imm, imm, o->in2);
1681 return help_branch(s, &c, is_imm, imm, o->in2);
1684 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1686 int imm, m3 = get_field(s, m3);
1687 bool is_imm;
1688 DisasCompare c;
1690 c.cond = ltgt_cond[m3];
1691 if (s->insn->data) {
1692 c.cond = tcg_unsigned_cond(c.cond);
1694 c.is_64 = true;
1695 c.u.s64.a = o->in1;
1696 c.u.s64.b = o->in2;
1698 o->out = NULL;
1699 disas_jdest(s, i4, is_imm, imm, o->out);
1700 if (!is_imm && !o->out) {
1701 imm = 0;
1702 o->out = get_address(s, 0, get_field(s, b4),
1703 get_field(s, d4));
1706 return help_branch(s, &c, is_imm, imm, o->out);
1709 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1711 gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1712 set_cc_static(s);
1713 return DISAS_NEXT;
1716 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1718 gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1719 set_cc_static(s);
1720 return DISAS_NEXT;
1723 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1725 gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1726 set_cc_static(s);
1727 return DISAS_NEXT;
1730 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1731 bool m4_with_fpe)
1733 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1734 uint8_t m3 = get_field(s, m3);
1735 uint8_t m4 = get_field(s, m4);
1737 /* m3 field was introduced with FPE */
1738 if (!fpe && m3_with_fpe) {
1739 m3 = 0;
1741 /* m4 field was introduced with FPE */
1742 if (!fpe && m4_with_fpe) {
1743 m4 = 0;
1746 /* Check for valid rounding modes. Mode 3 was introduced later. */
1747 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1748 gen_program_exception(s, PGM_SPECIFICATION);
1749 return NULL;
1752 return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1755 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1757 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1759 if (!m34) {
1760 return DISAS_NORETURN;
1762 gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1763 set_cc_static(s);
1764 return DISAS_NEXT;
1767 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1769 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1771 if (!m34) {
1772 return DISAS_NORETURN;
1774 gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1775 set_cc_static(s);
1776 return DISAS_NEXT;
1779 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1783 if (!m34) {
1784 return DISAS_NORETURN;
1786 gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1787 set_cc_static(s);
1788 return DISAS_NEXT;
1791 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1793 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1795 if (!m34) {
1796 return DISAS_NORETURN;
1798 gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1799 set_cc_static(s);
1800 return DISAS_NEXT;
1803 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1805 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1807 if (!m34) {
1808 return DISAS_NORETURN;
1810 gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1811 set_cc_static(s);
1812 return DISAS_NEXT;
1815 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1817 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1819 if (!m34) {
1820 return DISAS_NORETURN;
1822 gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1823 set_cc_static(s);
1824 return DISAS_NEXT;
1827 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1829 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1831 if (!m34) {
1832 return DISAS_NORETURN;
1834 gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1835 set_cc_static(s);
1836 return DISAS_NEXT;
1839 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1841 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1843 if (!m34) {
1844 return DISAS_NORETURN;
1846 gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1847 set_cc_static(s);
1848 return DISAS_NEXT;
1851 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1853 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1855 if (!m34) {
1856 return DISAS_NORETURN;
1858 gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1859 set_cc_static(s);
1860 return DISAS_NEXT;
1863 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1865 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1867 if (!m34) {
1868 return DISAS_NORETURN;
1870 gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1871 set_cc_static(s);
1872 return DISAS_NEXT;
1875 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1877 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1879 if (!m34) {
1880 return DISAS_NORETURN;
1882 gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1883 set_cc_static(s);
1884 return DISAS_NEXT;
1887 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1889 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1891 if (!m34) {
1892 return DISAS_NORETURN;
1894 gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1895 set_cc_static(s);
1896 return DISAS_NEXT;
1899 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1901 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1903 if (!m34) {
1904 return DISAS_NORETURN;
1906 gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1907 return DISAS_NEXT;
1910 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1912 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1914 if (!m34) {
1915 return DISAS_NORETURN;
1917 gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1918 return DISAS_NEXT;
1921 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1923 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1925 if (!m34) {
1926 return DISAS_NORETURN;
1928 gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1929 return DISAS_NEXT;
1932 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1934 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1936 if (!m34) {
1937 return DISAS_NORETURN;
1939 gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1940 return DISAS_NEXT;
1943 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1945 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1947 if (!m34) {
1948 return DISAS_NORETURN;
1950 gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1951 return DISAS_NEXT;
1954 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1956 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1958 if (!m34) {
1959 return DISAS_NORETURN;
1961 gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1962 return DISAS_NEXT;
1965 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1967 int r2 = get_field(s, r2);
1968 TCGv_i128 pair = tcg_temp_new_i128();
1969 TCGv_i64 len = tcg_temp_new_i64();
1971 gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1972 set_cc_static(s);
1973 tcg_gen_extr_i128_i64(o->out, len, pair);
1975 tcg_gen_add_i64(regs[r2], regs[r2], len);
1976 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1978 return DISAS_NEXT;
1981 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1983 int l = get_field(s, l1);
1984 TCGv_i64 src;
1985 TCGv_i32 vl;
1986 MemOp mop;
1988 switch (l + 1) {
1989 case 1:
1990 case 2:
1991 case 4:
1992 case 8:
1993 mop = ctz32(l + 1) | MO_TE;
1994 /* Do not update cc_src yet: loading cc_dst may cause an exception. */
1995 src = tcg_temp_new_i64();
1996 tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
1997 tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1998 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
1999 return DISAS_NEXT;
2000 default:
2001 vl = tcg_constant_i32(l);
2002 gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2003 set_cc_static(s);
2004 return DISAS_NEXT;
2008 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2010 int r1 = get_field(s, r1);
2011 int r2 = get_field(s, r2);
2012 TCGv_i32 t1, t2;
2014 /* r1 and r2 must be even. */
2015 if (r1 & 1 || r2 & 1) {
2016 gen_program_exception(s, PGM_SPECIFICATION);
2017 return DISAS_NORETURN;
2020 t1 = tcg_constant_i32(r1);
2021 t2 = tcg_constant_i32(r2);
2022 gen_helper_clcl(cc_op, tcg_env, t1, t2);
2023 set_cc_static(s);
2024 return DISAS_NEXT;
2027 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2029 int r1 = get_field(s, r1);
2030 int r3 = get_field(s, r3);
2031 TCGv_i32 t1, t3;
2033 /* r1 and r3 must be even. */
2034 if (r1 & 1 || r3 & 1) {
2035 gen_program_exception(s, PGM_SPECIFICATION);
2036 return DISAS_NORETURN;
2039 t1 = tcg_constant_i32(r1);
2040 t3 = tcg_constant_i32(r3);
2041 gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2042 set_cc_static(s);
2043 return DISAS_NEXT;
2046 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2048 int r1 = get_field(s, r1);
2049 int r3 = get_field(s, r3);
2050 TCGv_i32 t1, t3;
2052 /* r1 and r3 must be even. */
2053 if (r1 & 1 || r3 & 1) {
2054 gen_program_exception(s, PGM_SPECIFICATION);
2055 return DISAS_NORETURN;
2058 t1 = tcg_constant_i32(r1);
2059 t3 = tcg_constant_i32(r3);
2060 gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2061 set_cc_static(s);
2062 return DISAS_NEXT;
2065 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2067 TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2068 TCGv_i32 t1 = tcg_temp_new_i32();
2070 tcg_gen_extrl_i64_i32(t1, o->in1);
2071 gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2072 set_cc_static(s);
2073 return DISAS_NEXT;
2076 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2078 TCGv_i128 pair = tcg_temp_new_i128();
2080 gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2081 tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2083 set_cc_static(s);
2084 return DISAS_NEXT;
2087 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2089 TCGv_i64 t = tcg_temp_new_i64();
2090 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2091 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2092 tcg_gen_or_i64(o->out, o->out, t);
2093 return DISAS_NEXT;
2096 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2098 int d2 = get_field(s, d2);
2099 int b2 = get_field(s, b2);
2100 TCGv_i64 addr, cc;
2102 /* Note that in1 = R3 (new value) and
2103 in2 = (zero-extended) R1 (expected value). */
2105 addr = get_address(s, 0, b2, d2);
2106 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2107 get_mem_index(s), s->insn->data | MO_ALIGN);
2109 /* Are the memory and expected values (un)equal? Note that this setcond
2110 produces the output CC value, thus the NE sense of the test. */
2111 cc = tcg_temp_new_i64();
2112 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2113 tcg_gen_extrl_i64_i32(cc_op, cc);
2114 set_cc_static(s);
2116 return DISAS_NEXT;
2119 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2121 int r1 = get_field(s, r1);
2123 o->out_128 = tcg_temp_new_i128();
2124 tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2126 /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value. */
2127 tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2128 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2131 * Extract result into cc_dst:cc_src, compare vs the expected value
2132 * in the as yet unmodified input registers, then update CC_OP.
2134 tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2135 tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2136 tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2137 tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2138 set_cc_nz_u64(s, cc_dst);
2140 return DISAS_NEXT;
2143 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2145 int r3 = get_field(s, r3);
2146 TCGv_i32 t_r3 = tcg_constant_i32(r3);
2148 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2149 gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2150 } else {
2151 gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2154 set_cc_static(s);
2155 return DISAS_NEXT;
2158 #ifndef CONFIG_USER_ONLY
2159 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2161 MemOp mop = s->insn->data;
2162 TCGv_i64 addr, old, cc;
2163 TCGLabel *lab = gen_new_label();
2165 /* Note that in1 = R1 (zero-extended expected value),
2166 out = R1 (original reg), out2 = R1+1 (new value). */
2168 addr = tcg_temp_new_i64();
2169 old = tcg_temp_new_i64();
2170 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2171 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2172 get_mem_index(s), mop | MO_ALIGN);
2174 /* Are the memory and expected values (un)equal? */
2175 cc = tcg_temp_new_i64();
2176 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2177 tcg_gen_extrl_i64_i32(cc_op, cc);
2179 /* Write back the output now, so that it happens before the
2180 following branch, so that we don't need local temps. */
2181 if ((mop & MO_SIZE) == MO_32) {
2182 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2183 } else {
2184 tcg_gen_mov_i64(o->out, old);
2187 /* If the comparison was equal, and the LSB of R2 was set,
2188 then we need to flush the TLB (for all cpus). */
2189 tcg_gen_xori_i64(cc, cc, 1);
2190 tcg_gen_and_i64(cc, cc, o->in2);
2191 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2193 gen_helper_purge(tcg_env);
2194 gen_set_label(lab);
2196 return DISAS_NEXT;
2198 #endif
2200 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2202 TCGv_i64 t1 = tcg_temp_new_i64();
2203 TCGv_i32 t2 = tcg_temp_new_i32();
2204 tcg_gen_extrl_i64_i32(t2, o->in1);
2205 gen_helper_cvd(t1, t2);
2206 tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2207 return DISAS_NEXT;
2210 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2212 int m3 = get_field(s, m3);
2213 TCGLabel *lab = gen_new_label();
2214 TCGCond c;
2216 c = tcg_invert_cond(ltgt_cond[m3]);
2217 if (s->insn->data) {
2218 c = tcg_unsigned_cond(c);
2220 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2222 /* Trap. */
2223 gen_trap(s);
2225 gen_set_label(lab);
2226 return DISAS_NEXT;
2229 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2231 int m3 = get_field(s, m3);
2232 int r1 = get_field(s, r1);
2233 int r2 = get_field(s, r2);
2234 TCGv_i32 tr1, tr2, chk;
2236 /* R1 and R2 must both be even. */
2237 if ((r1 | r2) & 1) {
2238 gen_program_exception(s, PGM_SPECIFICATION);
2239 return DISAS_NORETURN;
2241 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2242 m3 = 0;
2245 tr1 = tcg_constant_i32(r1);
2246 tr2 = tcg_constant_i32(r2);
2247 chk = tcg_constant_i32(m3);
2249 switch (s->insn->data) {
2250 case 12:
2251 gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2252 break;
2253 case 14:
2254 gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2255 break;
2256 case 21:
2257 gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2258 break;
2259 case 24:
2260 gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2261 break;
2262 case 41:
2263 gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2264 break;
2265 case 42:
2266 gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2267 break;
2268 default:
2269 g_assert_not_reached();
2272 set_cc_static(s);
2273 return DISAS_NEXT;
2276 #ifndef CONFIG_USER_ONLY
2277 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2279 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2280 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2281 TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2283 gen_helper_diag(tcg_env, r1, r3, func_code);
2284 return DISAS_NEXT;
2286 #endif
2288 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2290 gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2291 tcg_gen_extr32_i64(o->out2, o->out, o->out);
2292 return DISAS_NEXT;
2295 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2297 gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2298 tcg_gen_extr32_i64(o->out2, o->out, o->out);
2299 return DISAS_NEXT;
2302 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2304 TCGv_i128 t = tcg_temp_new_i128();
2306 gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2307 tcg_gen_extr_i128_i64(o->out2, o->out, t);
2308 return DISAS_NEXT;
2311 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2313 TCGv_i128 t = tcg_temp_new_i128();
2315 gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2316 tcg_gen_extr_i128_i64(o->out2, o->out, t);
2317 return DISAS_NEXT;
2320 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2322 gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2323 return DISAS_NEXT;
2326 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2328 gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2329 return DISAS_NEXT;
2332 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2334 gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2335 return DISAS_NEXT;
2338 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2340 int r2 = get_field(s, r2);
2341 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2342 return DISAS_NEXT;
2345 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2347 /* No cache information provided. */
2348 tcg_gen_movi_i64(o->out, -1);
2349 return DISAS_NEXT;
2352 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2354 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2355 return DISAS_NEXT;
2358 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2360 int r1 = get_field(s, r1);
2361 int r2 = get_field(s, r2);
2362 TCGv_i64 t = tcg_temp_new_i64();
2363 TCGv_i64 t_cc = tcg_temp_new_i64();
2365 /* Note the "subsequently" in the PoO, which implies a defined result
2366 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2367 gen_op_calc_cc(s);
2368 tcg_gen_extu_i32_i64(t_cc, cc_op);
2369 tcg_gen_shri_i64(t, psw_mask, 32);
2370 tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2371 store_reg32_i64(r1, t);
2372 if (r2 != 0) {
2373 store_reg32_i64(r2, psw_mask);
2375 return DISAS_NEXT;
2378 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2380 int r1 = get_field(s, r1);
2381 TCGv_i32 ilen;
2382 TCGv_i64 v1;
2384 /* Nested EXECUTE is not allowed. */
2385 if (unlikely(s->ex_value)) {
2386 gen_program_exception(s, PGM_EXECUTE);
2387 return DISAS_NORETURN;
2390 update_psw_addr(s);
2391 update_cc_op(s);
2393 if (r1 == 0) {
2394 v1 = tcg_constant_i64(0);
2395 } else {
2396 v1 = regs[r1];
2399 ilen = tcg_constant_i32(s->ilen);
2400 gen_helper_ex(tcg_env, ilen, v1, o->in2);
2402 return DISAS_PC_CC_UPDATED;
2405 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2407 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2409 if (!m34) {
2410 return DISAS_NORETURN;
2412 gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2413 return DISAS_NEXT;
2416 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2418 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2420 if (!m34) {
2421 return DISAS_NORETURN;
2423 gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2424 return DISAS_NEXT;
2427 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2429 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2431 if (!m34) {
2432 return DISAS_NORETURN;
2434 gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2435 return DISAS_NEXT;
2438 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2440 /* We'll use the original input for cc computation, since we get to
2441 compare that against 0, which ought to be better than comparing
2442 the real output against 64. It also lets cc_dst be a convenient
2443 temporary during our computation. */
2444 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2446 /* R1 = IN ? CLZ(IN) : 64. */
2447 tcg_gen_clzi_i64(o->out, o->in2, 64);
2449 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2450 value by 64, which is undefined. But since the shift is 64 iff the
2451 input is zero, we still get the correct result after and'ing. */
2452 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2453 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2454 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2455 return DISAS_NEXT;
2458 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2460 int m3 = get_field(s, m3);
2461 int pos, len, base = s->insn->data;
2462 TCGv_i64 tmp = tcg_temp_new_i64();
2463 uint64_t ccm;
2465 switch (m3) {
2466 case 0xf:
2467 /* Effectively a 32-bit load. */
2468 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2469 len = 32;
2470 goto one_insert;
2472 case 0xc:
2473 case 0x6:
2474 case 0x3:
2475 /* Effectively a 16-bit load. */
2476 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2477 len = 16;
2478 goto one_insert;
2480 case 0x8:
2481 case 0x4:
2482 case 0x2:
2483 case 0x1:
2484 /* Effectively an 8-bit load. */
2485 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2486 len = 8;
2487 goto one_insert;
2489 one_insert:
2490 pos = base + ctz32(m3) * 8;
2491 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2492 ccm = ((1ull << len) - 1) << pos;
2493 break;
2495 case 0:
2496 /* Recognize access exceptions for the first byte. */
2497 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2498 gen_op_movi_cc(s, 0);
2499 return DISAS_NEXT;
2501 default:
2502 /* This is going to be a sequence of loads and inserts. */
2503 pos = base + 32 - 8;
2504 ccm = 0;
2505 while (m3) {
2506 if (m3 & 0x8) {
2507 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2508 tcg_gen_addi_i64(o->in2, o->in2, 1);
2509 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2510 ccm |= 0xffull << pos;
2512 m3 = (m3 << 1) & 0xf;
2513 pos -= 8;
2515 break;
2518 tcg_gen_movi_i64(tmp, ccm);
2519 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2520 return DISAS_NEXT;
2523 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2525 int shift = s->insn->data & 0xff;
2526 int size = s->insn->data >> 8;
2527 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2528 return DISAS_NEXT;
2531 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2533 TCGv_i64 t1, t2;
2535 gen_op_calc_cc(s);
2536 t1 = tcg_temp_new_i64();
2537 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2538 t2 = tcg_temp_new_i64();
2539 tcg_gen_extu_i32_i64(t2, cc_op);
2540 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2541 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2542 return DISAS_NEXT;
2545 #ifndef CONFIG_USER_ONLY
2546 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2548 TCGv_i32 m4;
2550 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2551 m4 = tcg_constant_i32(get_field(s, m4));
2552 } else {
2553 m4 = tcg_constant_i32(0);
2555 gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2556 return DISAS_NEXT;
2559 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2561 TCGv_i32 m4;
2563 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2564 m4 = tcg_constant_i32(get_field(s, m4));
2565 } else {
2566 m4 = tcg_constant_i32(0);
2568 gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2569 return DISAS_NEXT;
2572 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2574 gen_helper_iske(o->out, tcg_env, o->in2);
2575 return DISAS_NEXT;
2577 #endif
2579 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2581 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2582 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2583 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2584 TCGv_i32 t_r1, t_r2, t_r3, type;
2586 switch (s->insn->data) {
2587 case S390_FEAT_TYPE_KMA:
2588 if (r3 == r1 || r3 == r2) {
2589 gen_program_exception(s, PGM_SPECIFICATION);
2590 return DISAS_NORETURN;
2592 /* FALL THROUGH */
2593 case S390_FEAT_TYPE_KMCTR:
2594 if (r3 & 1 || !r3) {
2595 gen_program_exception(s, PGM_SPECIFICATION);
2596 return DISAS_NORETURN;
2598 /* FALL THROUGH */
2599 case S390_FEAT_TYPE_PPNO:
2600 case S390_FEAT_TYPE_KMF:
2601 case S390_FEAT_TYPE_KMC:
2602 case S390_FEAT_TYPE_KMO:
2603 case S390_FEAT_TYPE_KM:
2604 if (r1 & 1 || !r1) {
2605 gen_program_exception(s, PGM_SPECIFICATION);
2606 return DISAS_NORETURN;
2608 /* FALL THROUGH */
2609 case S390_FEAT_TYPE_KMAC:
2610 case S390_FEAT_TYPE_KIMD:
2611 case S390_FEAT_TYPE_KLMD:
2612 if (r2 & 1 || !r2) {
2613 gen_program_exception(s, PGM_SPECIFICATION);
2614 return DISAS_NORETURN;
2616 /* FALL THROUGH */
2617 case S390_FEAT_TYPE_PCKMO:
2618 case S390_FEAT_TYPE_PCC:
2619 break;
2620 default:
2621 g_assert_not_reached();
2624 t_r1 = tcg_constant_i32(r1);
2625 t_r2 = tcg_constant_i32(r2);
2626 t_r3 = tcg_constant_i32(r3);
2627 type = tcg_constant_i32(s->insn->data);
2628 gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2629 set_cc_static(s);
2630 return DISAS_NEXT;
2633 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2635 gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2636 set_cc_static(s);
2637 return DISAS_NEXT;
2640 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2642 gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2643 set_cc_static(s);
2644 return DISAS_NEXT;
2647 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2649 gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2650 set_cc_static(s);
2651 return DISAS_NEXT;
2654 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2656 /* The real output is indeed the original value in memory;
2657 recompute the addition for the computation of CC. */
2658 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2659 s->insn->data | MO_ALIGN);
2660 /* However, we need to recompute the addition for setting CC. */
2661 if (addu64) {
2662 tcg_gen_movi_i64(cc_src, 0);
2663 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2664 } else {
2665 tcg_gen_add_i64(o->out, o->in1, o->in2);
2667 return DISAS_NEXT;
2670 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2672 return help_laa(s, o, false);
2675 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2677 return help_laa(s, o, true);
2680 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2682 /* The real output is indeed the original value in memory;
2683 recompute the addition for the computation of CC. */
2684 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2685 s->insn->data | MO_ALIGN);
2686 /* However, we need to recompute the operation for setting CC. */
2687 tcg_gen_and_i64(o->out, o->in1, o->in2);
2688 return DISAS_NEXT;
2691 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2693 /* The real output is indeed the original value in memory;
2694 recompute the addition for the computation of CC. */
2695 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2696 s->insn->data | MO_ALIGN);
2697 /* However, we need to recompute the operation for setting CC. */
2698 tcg_gen_or_i64(o->out, o->in1, o->in2);
2699 return DISAS_NEXT;
2702 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2704 /* The real output is indeed the original value in memory;
2705 recompute the addition for the computation of CC. */
2706 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2707 s->insn->data | MO_ALIGN);
2708 /* However, we need to recompute the operation for setting CC. */
2709 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2710 return DISAS_NEXT;
2713 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2715 gen_helper_ldeb(o->out, tcg_env, o->in2);
2716 return DISAS_NEXT;
2719 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2721 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2723 if (!m34) {
2724 return DISAS_NORETURN;
2726 gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2727 return DISAS_NEXT;
2730 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2732 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2734 if (!m34) {
2735 return DISAS_NORETURN;
2737 gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2738 return DISAS_NEXT;
2741 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2743 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2745 if (!m34) {
2746 return DISAS_NORETURN;
2748 gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2749 return DISAS_NEXT;
2752 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2754 gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2755 return DISAS_NEXT;
2758 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2760 gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2761 return DISAS_NEXT;
2764 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2766 tcg_gen_shli_i64(o->out, o->in2, 32);
2767 return DISAS_NEXT;
2770 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2772 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2773 return DISAS_NEXT;
2776 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2778 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2779 return DISAS_NEXT;
2782 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2784 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2785 return DISAS_NEXT;
2788 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2790 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2791 return DISAS_NEXT;
2794 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2796 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2797 return DISAS_NEXT;
2800 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2802 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2803 MO_TESL | s->insn->data);
2804 return DISAS_NEXT;
2807 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2809 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2810 MO_TEUL | s->insn->data);
2811 return DISAS_NEXT;
2814 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2816 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2817 MO_TEUQ | s->insn->data);
2818 return DISAS_NEXT;
2821 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2823 TCGLabel *lab = gen_new_label();
2824 store_reg32_i64(get_field(s, r1), o->in2);
2825 /* The value is stored even in case of trap. */
2826 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2827 gen_trap(s);
2828 gen_set_label(lab);
2829 return DISAS_NEXT;
2832 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2834 TCGLabel *lab = gen_new_label();
2835 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2836 /* The value is stored even in case of trap. */
2837 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2838 gen_trap(s);
2839 gen_set_label(lab);
2840 return DISAS_NEXT;
2843 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2845 TCGLabel *lab = gen_new_label();
2846 store_reg32h_i64(get_field(s, r1), o->in2);
2847 /* The value is stored even in case of trap. */
2848 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2849 gen_trap(s);
2850 gen_set_label(lab);
2851 return DISAS_NEXT;
2854 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2856 TCGLabel *lab = gen_new_label();
2858 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2859 /* The value is stored even in case of trap. */
2860 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2861 gen_trap(s);
2862 gen_set_label(lab);
2863 return DISAS_NEXT;
2866 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2868 TCGLabel *lab = gen_new_label();
2869 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2870 /* The value is stored even in case of trap. */
2871 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2872 gen_trap(s);
2873 gen_set_label(lab);
2874 return DISAS_NEXT;
2877 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2879 DisasCompare c;
2881 if (have_field(s, m3)) {
2882 /* LOAD * ON CONDITION */
2883 disas_jcc(s, &c, get_field(s, m3));
2884 } else {
2885 /* SELECT */
2886 disas_jcc(s, &c, get_field(s, m4));
2889 if (c.is_64) {
2890 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2891 o->in2, o->in1);
2892 } else {
2893 TCGv_i32 t32 = tcg_temp_new_i32();
2894 TCGv_i64 t, z;
2896 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2898 t = tcg_temp_new_i64();
2899 tcg_gen_extu_i32_i64(t, t32);
2901 z = tcg_constant_i64(0);
2902 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2905 return DISAS_NEXT;
2908 #ifndef CONFIG_USER_ONLY
2909 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2911 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2912 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2914 gen_helper_lctl(tcg_env, r1, o->in2, r3);
2915 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2916 s->exit_to_mainloop = true;
2917 return DISAS_TOO_MANY;
2920 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2922 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2923 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2925 gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2926 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2927 s->exit_to_mainloop = true;
2928 return DISAS_TOO_MANY;
2931 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2933 gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2934 set_cc_static(s);
2935 return DISAS_NEXT;
2938 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2940 tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2941 return DISAS_NEXT;
2944 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2946 TCGv_i64 mask, addr;
2948 per_breaking_event(s);
2951 * Convert the short PSW into the normal PSW, similar to what
2952 * s390_cpu_load_normal() does.
2954 mask = tcg_temp_new_i64();
2955 addr = tcg_temp_new_i64();
2956 tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2957 tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2958 tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2959 tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2960 gen_helper_load_psw(tcg_env, mask, addr);
2961 return DISAS_NORETURN;
2964 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2966 TCGv_i64 t1, t2;
2968 per_breaking_event(s);
2970 t1 = tcg_temp_new_i64();
2971 t2 = tcg_temp_new_i64();
2972 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2973 MO_TEUQ | MO_ALIGN_8);
2974 tcg_gen_addi_i64(o->in2, o->in2, 8);
2975 tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2976 gen_helper_load_psw(tcg_env, t1, t2);
2977 return DISAS_NORETURN;
2979 #endif
2981 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2983 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2984 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2986 gen_helper_lam(tcg_env, r1, o->in2, r3);
2987 return DISAS_NEXT;
2990 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2992 int r1 = get_field(s, r1);
2993 int r3 = get_field(s, r3);
2994 TCGv_i64 t1, t2;
2996 /* Only one register to read. */
2997 t1 = tcg_temp_new_i64();
2998 if (unlikely(r1 == r3)) {
2999 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3000 store_reg32_i64(r1, t1);
3001 return DISAS_NEXT;
3004 /* First load the values of the first and last registers to trigger
3005 possible page faults. */
3006 t2 = tcg_temp_new_i64();
3007 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3008 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3009 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3010 store_reg32_i64(r1, t1);
3011 store_reg32_i64(r3, t2);
3013 /* Only two registers to read. */
3014 if (((r1 + 1) & 15) == r3) {
3015 return DISAS_NEXT;
3018 /* Then load the remaining registers. Page fault can't occur. */
3019 r3 = (r3 - 1) & 15;
3020 tcg_gen_movi_i64(t2, 4);
3021 while (r1 != r3) {
3022 r1 = (r1 + 1) & 15;
3023 tcg_gen_add_i64(o->in2, o->in2, t2);
3024 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3025 store_reg32_i64(r1, t1);
3027 return DISAS_NEXT;
3030 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3032 int r1 = get_field(s, r1);
3033 int r3 = get_field(s, r3);
3034 TCGv_i64 t1, t2;
3036 /* Only one register to read. */
3037 t1 = tcg_temp_new_i64();
3038 if (unlikely(r1 == r3)) {
3039 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3040 store_reg32h_i64(r1, t1);
3041 return DISAS_NEXT;
3044 /* First load the values of the first and last registers to trigger
3045 possible page faults. */
3046 t2 = tcg_temp_new_i64();
3047 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3048 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3049 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3050 store_reg32h_i64(r1, t1);
3051 store_reg32h_i64(r3, t2);
3053 /* Only two registers to read. */
3054 if (((r1 + 1) & 15) == r3) {
3055 return DISAS_NEXT;
3058 /* Then load the remaining registers. Page fault can't occur. */
3059 r3 = (r3 - 1) & 15;
3060 tcg_gen_movi_i64(t2, 4);
3061 while (r1 != r3) {
3062 r1 = (r1 + 1) & 15;
3063 tcg_gen_add_i64(o->in2, o->in2, t2);
3064 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3065 store_reg32h_i64(r1, t1);
3067 return DISAS_NEXT;
3070 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3072 int r1 = get_field(s, r1);
3073 int r3 = get_field(s, r3);
3074 TCGv_i64 t1, t2;
3076 /* Only one register to read. */
3077 if (unlikely(r1 == r3)) {
3078 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3079 return DISAS_NEXT;
3082 /* First load the values of the first and last registers to trigger
3083 possible page faults. */
3084 t1 = tcg_temp_new_i64();
3085 t2 = tcg_temp_new_i64();
3086 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3087 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3088 tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3089 tcg_gen_mov_i64(regs[r1], t1);
3091 /* Only two registers to read. */
3092 if (((r1 + 1) & 15) == r3) {
3093 return DISAS_NEXT;
3096 /* Then load the remaining registers. Page fault can't occur. */
3097 r3 = (r3 - 1) & 15;
3098 tcg_gen_movi_i64(t1, 8);
3099 while (r1 != r3) {
3100 r1 = (r1 + 1) & 15;
3101 tcg_gen_add_i64(o->in2, o->in2, t1);
3102 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3104 return DISAS_NEXT;
3107 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3109 TCGv_i64 a1, a2;
3110 MemOp mop = s->insn->data;
3112 /* In a parallel context, stop the world and single step. */
3113 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3114 update_psw_addr(s);
3115 update_cc_op(s);
3116 gen_exception(EXCP_ATOMIC);
3117 return DISAS_NORETURN;
3120 /* In a serial context, perform the two loads ... */
3121 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3122 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3123 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3124 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3126 /* ... and indicate that we performed them while interlocked. */
3127 gen_op_movi_cc(s, 0);
3128 return DISAS_NEXT;
3131 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3133 o->out_128 = tcg_temp_new_i128();
3134 tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3135 MO_TE | MO_128 | MO_ALIGN);
3136 return DISAS_NEXT;
3139 #ifndef CONFIG_USER_ONLY
3140 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3142 tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3143 return DISAS_NEXT;
3145 #endif
3147 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3149 tcg_gen_andi_i64(o->out, o->in2, -256);
3150 return DISAS_NEXT;
3153 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3155 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3157 if (get_field(s, m3) > 6) {
3158 gen_program_exception(s, PGM_SPECIFICATION);
3159 return DISAS_NORETURN;
3162 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3163 tcg_gen_neg_i64(o->addr1, o->addr1);
3164 tcg_gen_movi_i64(o->out, 16);
3165 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3166 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3167 return DISAS_NEXT;
3170 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3172 const uint8_t monitor_class = get_field(s, i2);
3174 if (monitor_class & 0xf0) {
3175 gen_program_exception(s, PGM_SPECIFICATION);
3176 return DISAS_NORETURN;
3179 #if !defined(CONFIG_USER_ONLY)
3180 gen_helper_monitor_call(tcg_env, o->addr1,
3181 tcg_constant_i32(monitor_class));
3182 #endif
3183 /* Defaults to a NOP. */
3184 return DISAS_NEXT;
3187 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3189 o->out = o->in2;
3190 o->in2 = NULL;
3191 return DISAS_NEXT;
3194 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3196 int b2 = get_field(s, b2);
3197 TCGv ar1 = tcg_temp_new_i64();
3198 int r1 = get_field(s, r1);
3200 o->out = o->in2;
3201 o->in2 = NULL;
3203 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3204 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3205 tcg_gen_movi_i64(ar1, 0);
3206 break;
3207 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3208 tcg_gen_movi_i64(ar1, 1);
3209 break;
3210 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3211 if (b2) {
3212 tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3213 } else {
3214 tcg_gen_movi_i64(ar1, 0);
3216 break;
3217 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3218 tcg_gen_movi_i64(ar1, 2);
3219 break;
3222 tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3223 return DISAS_NEXT;
3226 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3228 o->out = o->in1;
3229 o->out2 = o->in2;
3230 o->in1 = NULL;
3231 o->in2 = NULL;
3232 return DISAS_NEXT;
3235 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3237 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3239 gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3240 return DISAS_NEXT;
3243 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3245 gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3246 return DISAS_NEXT;
3249 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3251 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3253 gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3254 return DISAS_NEXT;
3257 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3259 int r1 = get_field(s, r1);
3260 int r2 = get_field(s, r2);
3261 TCGv_i32 t1, t2;
3263 /* r1 and r2 must be even. */
3264 if (r1 & 1 || r2 & 1) {
3265 gen_program_exception(s, PGM_SPECIFICATION);
3266 return DISAS_NORETURN;
3269 t1 = tcg_constant_i32(r1);
3270 t2 = tcg_constant_i32(r2);
3271 gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3272 set_cc_static(s);
3273 return DISAS_NEXT;
3276 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3278 int r1 = get_field(s, r1);
3279 int r3 = get_field(s, r3);
3280 TCGv_i32 t1, t3;
3282 /* r1 and r3 must be even. */
3283 if (r1 & 1 || r3 & 1) {
3284 gen_program_exception(s, PGM_SPECIFICATION);
3285 return DISAS_NORETURN;
3288 t1 = tcg_constant_i32(r1);
3289 t3 = tcg_constant_i32(r3);
3290 gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3291 set_cc_static(s);
3292 return DISAS_NEXT;
3295 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3297 int r1 = get_field(s, r1);
3298 int r3 = get_field(s, r3);
3299 TCGv_i32 t1, t3;
3301 /* r1 and r3 must be even. */
3302 if (r1 & 1 || r3 & 1) {
3303 gen_program_exception(s, PGM_SPECIFICATION);
3304 return DISAS_NORETURN;
3307 t1 = tcg_constant_i32(r1);
3308 t3 = tcg_constant_i32(r3);
3309 gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3310 set_cc_static(s);
3311 return DISAS_NEXT;
3314 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3316 int r3 = get_field(s, r3);
3317 gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3318 set_cc_static(s);
3319 return DISAS_NEXT;
3322 #ifndef CONFIG_USER_ONLY
3323 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3325 int r1 = get_field(s, l1);
3326 int r3 = get_field(s, r3);
3327 gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3328 set_cc_static(s);
3329 return DISAS_NEXT;
3332 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3334 int r1 = get_field(s, l1);
3335 int r3 = get_field(s, r3);
3336 gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3337 set_cc_static(s);
3338 return DISAS_NEXT;
3340 #endif
3342 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3344 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3346 gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3347 return DISAS_NEXT;
3350 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3352 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3354 gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3355 return DISAS_NEXT;
3358 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3360 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3361 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3363 gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3364 set_cc_static(s);
3365 return DISAS_NEXT;
3368 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3370 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3371 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3373 gen_helper_mvst(cc_op, tcg_env, t1, t2);
3374 set_cc_static(s);
3375 return DISAS_NEXT;
3378 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3380 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3382 gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3383 return DISAS_NEXT;
3386 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3388 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3389 return DISAS_NEXT;
3392 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3394 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3395 return DISAS_NEXT;
3398 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3400 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3401 return DISAS_NEXT;
3404 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3406 gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3407 return DISAS_NEXT;
3410 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3412 gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3413 return DISAS_NEXT;
3416 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3418 gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3419 return DISAS_NEXT;
3422 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3424 gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3425 return DISAS_NEXT;
3428 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3430 gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3431 return DISAS_NEXT;
3434 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3436 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3437 gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3438 return DISAS_NEXT;
3441 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3443 TCGv_i64 r3 = load_freg(get_field(s, r3));
3444 gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3445 return DISAS_NEXT;
3448 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3450 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3451 gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3452 return DISAS_NEXT;
3455 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3457 TCGv_i64 r3 = load_freg(get_field(s, r3));
3458 gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3459 return DISAS_NEXT;
3462 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3464 TCGv_i64 z = tcg_constant_i64(0);
3465 TCGv_i64 n = tcg_temp_new_i64();
3467 tcg_gen_neg_i64(n, o->in2);
3468 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3469 return DISAS_NEXT;
3472 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3474 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3475 return DISAS_NEXT;
3478 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3480 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3481 return DISAS_NEXT;
3484 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3486 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3487 tcg_gen_mov_i64(o->out2, o->in2);
3488 return DISAS_NEXT;
3491 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3493 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3495 gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3496 set_cc_static(s);
3497 return DISAS_NEXT;
3500 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3502 tcg_gen_neg_i64(o->out, o->in2);
3503 return DISAS_NEXT;
3506 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3508 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3509 return DISAS_NEXT;
3512 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3514 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3515 return DISAS_NEXT;
3518 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3520 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3521 tcg_gen_mov_i64(o->out2, o->in2);
3522 return DISAS_NEXT;
3525 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3527 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3529 gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3530 set_cc_static(s);
3531 return DISAS_NEXT;
3534 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3536 tcg_gen_or_i64(o->out, o->in1, o->in2);
3537 return DISAS_NEXT;
3540 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3542 int shift = s->insn->data & 0xff;
3543 int size = s->insn->data >> 8;
3544 uint64_t mask = ((1ull << size) - 1) << shift;
3545 TCGv_i64 t = tcg_temp_new_i64();
3547 tcg_gen_shli_i64(t, o->in2, shift);
3548 tcg_gen_or_i64(o->out, o->in1, t);
3550 /* Produce the CC from only the bits manipulated. */
3551 tcg_gen_andi_i64(cc_dst, o->out, mask);
3552 set_cc_nz_u64(s, cc_dst);
3553 return DISAS_NEXT;
3556 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3558 o->in1 = tcg_temp_new_i64();
3560 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3561 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3562 } else {
3563 /* Perform the atomic operation in memory. */
3564 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3565 s->insn->data);
3568 /* Recompute also for atomic case: needed for setting CC. */
3569 tcg_gen_or_i64(o->out, o->in1, o->in2);
3571 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3572 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3574 return DISAS_NEXT;
3577 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3579 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3581 gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3582 return DISAS_NEXT;
3585 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3587 int l2 = get_field(s, l2) + 1;
3588 TCGv_i32 l;
3590 /* The length must not exceed 32 bytes. */
3591 if (l2 > 32) {
3592 gen_program_exception(s, PGM_SPECIFICATION);
3593 return DISAS_NORETURN;
3595 l = tcg_constant_i32(l2);
3596 gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3597 return DISAS_NEXT;
3600 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3602 int l2 = get_field(s, l2) + 1;
3603 TCGv_i32 l;
3605 /* The length must be even and should not exceed 64 bytes. */
3606 if ((l2 & 1) || (l2 > 64)) {
3607 gen_program_exception(s, PGM_SPECIFICATION);
3608 return DISAS_NORETURN;
3610 l = tcg_constant_i32(l2);
3611 gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3612 return DISAS_NEXT;
3615 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3617 const uint8_t m3 = get_field(s, m3);
3619 if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3620 tcg_gen_ctpop_i64(o->out, o->in2);
3621 } else {
3622 gen_helper_popcnt(o->out, o->in2);
3624 return DISAS_NEXT;
3627 #ifndef CONFIG_USER_ONLY
3628 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3630 gen_helper_ptlb(tcg_env);
3631 return DISAS_NEXT;
3633 #endif
3635 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3637 int i3 = get_field(s, i3);
3638 int i4 = get_field(s, i4);
3639 int i5 = get_field(s, i5);
3640 int do_zero = i4 & 0x80;
3641 uint64_t mask, imask, pmask;
3642 int pos, len, rot;
3644 /* Adjust the arguments for the specific insn. */
3645 switch (s->fields.op2) {
3646 case 0x55: /* risbg */
3647 case 0x59: /* risbgn */
3648 i3 &= 63;
3649 i4 &= 63;
3650 pmask = ~0;
3651 break;
3652 case 0x5d: /* risbhg */
3653 i3 &= 31;
3654 i4 &= 31;
3655 pmask = 0xffffffff00000000ull;
3656 break;
3657 case 0x51: /* risblg */
3658 i3 = (i3 & 31) + 32;
3659 i4 = (i4 & 31) + 32;
3660 pmask = 0x00000000ffffffffull;
3661 break;
3662 default:
3663 g_assert_not_reached();
3666 /* MASK is the set of bits to be inserted from R2. */
3667 if (i3 <= i4) {
3668 /* [0...i3---i4...63] */
3669 mask = (-1ull >> i3) & (-1ull << (63 - i4));
3670 } else {
3671 /* [0---i4...i3---63] */
3672 mask = (-1ull >> i3) | (-1ull << (63 - i4));
3674 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3675 mask &= pmask;
3677 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3678 insns, we need to keep the other half of the register. */
3679 imask = ~mask | ~pmask;
3680 if (do_zero) {
3681 imask = ~pmask;
3684 len = i4 - i3 + 1;
3685 pos = 63 - i4;
3686 rot = i5 & 63;
3688 /* In some cases we can implement this with extract. */
3689 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3690 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3691 return DISAS_NEXT;
3694 /* In some cases we can implement this with deposit. */
3695 if (len > 0 && (imask == 0 || ~mask == imask)) {
3696 /* Note that we rotate the bits to be inserted to the lsb, not to
3697 the position as described in the PoO. */
3698 rot = (rot - pos) & 63;
3699 } else {
3700 pos = -1;
3703 /* Rotate the input as necessary. */
3704 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3706 /* Insert the selected bits into the output. */
3707 if (pos >= 0) {
3708 if (imask == 0) {
3709 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3710 } else {
3711 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3713 } else if (imask == 0) {
3714 tcg_gen_andi_i64(o->out, o->in2, mask);
3715 } else {
3716 tcg_gen_andi_i64(o->in2, o->in2, mask);
3717 tcg_gen_andi_i64(o->out, o->out, imask);
3718 tcg_gen_or_i64(o->out, o->out, o->in2);
3720 return DISAS_NEXT;
3723 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3725 int i3 = get_field(s, i3);
3726 int i4 = get_field(s, i4);
3727 int i5 = get_field(s, i5);
3728 TCGv_i64 orig_out;
3729 uint64_t mask;
3731 /* If this is a test-only form, arrange to discard the result. */
3732 if (i3 & 0x80) {
3733 tcg_debug_assert(o->out != NULL);
3734 orig_out = o->out;
3735 o->out = tcg_temp_new_i64();
3736 tcg_gen_mov_i64(o->out, orig_out);
3739 i3 &= 63;
3740 i4 &= 63;
3741 i5 &= 63;
3743 /* MASK is the set of bits to be operated on from R2.
3744 Take care for I3/I4 wraparound. */
3745 mask = ~0ull >> i3;
3746 if (i3 <= i4) {
3747 mask ^= ~0ull >> i4 >> 1;
3748 } else {
3749 mask |= ~(~0ull >> i4 >> 1);
3752 /* Rotate the input as necessary. */
3753 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3755 /* Operate. */
3756 switch (s->fields.op2) {
3757 case 0x54: /* AND */
3758 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3759 tcg_gen_and_i64(o->out, o->out, o->in2);
3760 break;
3761 case 0x56: /* OR */
3762 tcg_gen_andi_i64(o->in2, o->in2, mask);
3763 tcg_gen_or_i64(o->out, o->out, o->in2);
3764 break;
3765 case 0x57: /* XOR */
3766 tcg_gen_andi_i64(o->in2, o->in2, mask);
3767 tcg_gen_xor_i64(o->out, o->out, o->in2);
3768 break;
3769 default:
3770 abort();
3773 /* Set the CC. */
3774 tcg_gen_andi_i64(cc_dst, o->out, mask);
3775 set_cc_nz_u64(s, cc_dst);
3776 return DISAS_NEXT;
3779 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3781 tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3782 return DISAS_NEXT;
3785 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3787 tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3788 return DISAS_NEXT;
3791 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3793 tcg_gen_bswap64_i64(o->out, o->in2);
3794 return DISAS_NEXT;
3797 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3799 TCGv_i32 t1 = tcg_temp_new_i32();
3800 TCGv_i32 t2 = tcg_temp_new_i32();
3801 TCGv_i32 to = tcg_temp_new_i32();
3802 tcg_gen_extrl_i64_i32(t1, o->in1);
3803 tcg_gen_extrl_i64_i32(t2, o->in2);
3804 tcg_gen_rotl_i32(to, t1, t2);
3805 tcg_gen_extu_i32_i64(o->out, to);
3806 return DISAS_NEXT;
3809 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3811 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3812 return DISAS_NEXT;
3815 #ifndef CONFIG_USER_ONLY
3816 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3818 gen_helper_rrbe(cc_op, tcg_env, o->in2);
3819 set_cc_static(s);
3820 return DISAS_NEXT;
3823 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3825 gen_helper_sacf(tcg_env, o->in2);
3826 /* Addressing mode has changed, so end the block. */
3827 return DISAS_TOO_MANY;
3829 #endif
3831 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3833 int sam = s->insn->data;
3834 TCGv_i64 tsam;
3835 uint64_t mask;
3837 switch (sam) {
3838 case 0:
3839 mask = 0xffffff;
3840 break;
3841 case 1:
3842 mask = 0x7fffffff;
3843 break;
3844 default:
3845 mask = -1;
3846 break;
3849 /* Bizarre but true, we check the address of the current insn for the
3850 specification exception, not the next to be executed. Thus the PoO
3851 documents that Bad Things Happen two bytes before the end. */
3852 if (s->base.pc_next & ~mask) {
3853 gen_program_exception(s, PGM_SPECIFICATION);
3854 return DISAS_NORETURN;
3856 s->pc_tmp &= mask;
3858 tsam = tcg_constant_i64(sam);
3859 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3861 /* Always exit the TB, since we (may have) changed execution mode. */
3862 return DISAS_TOO_MANY;
3865 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3867 int r1 = get_field(s, r1);
3868 tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3869 return DISAS_NEXT;
3872 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3874 gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3875 return DISAS_NEXT;
3878 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3880 gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3881 return DISAS_NEXT;
3884 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3886 gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3887 return DISAS_NEXT;
3890 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3892 gen_helper_sqeb(o->out, tcg_env, o->in2);
3893 return DISAS_NEXT;
3896 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3898 gen_helper_sqdb(o->out, tcg_env, o->in2);
3899 return DISAS_NEXT;
3902 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3904 gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3905 return DISAS_NEXT;
3908 #ifndef CONFIG_USER_ONLY
3909 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3911 gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3912 set_cc_static(s);
3913 return DISAS_NEXT;
3916 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3918 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3919 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3921 gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3922 set_cc_static(s);
3923 return DISAS_NEXT;
3925 #endif
3927 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3929 DisasCompare c;
3930 TCGv_i64 a, h;
3931 TCGLabel *lab;
3932 int r1;
3934 disas_jcc(s, &c, get_field(s, m3));
3936 /* We want to store when the condition is fulfilled, so branch
3937 out when it's not */
3938 c.cond = tcg_invert_cond(c.cond);
3940 lab = gen_new_label();
3941 if (c.is_64) {
3942 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3943 } else {
3944 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3947 r1 = get_field(s, r1);
3948 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3949 switch (s->insn->data) {
3950 case 1: /* STOCG */
3951 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3952 break;
3953 case 0: /* STOC */
3954 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3955 break;
3956 case 2: /* STOCFH */
3957 h = tcg_temp_new_i64();
3958 tcg_gen_shri_i64(h, regs[r1], 32);
3959 tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3960 break;
3961 default:
3962 g_assert_not_reached();
3965 gen_set_label(lab);
3966 return DISAS_NEXT;
3969 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3971 TCGv_i64 t;
3972 uint64_t sign = 1ull << s->insn->data;
3973 if (s->insn->data == 31) {
3974 t = tcg_temp_new_i64();
3975 tcg_gen_shli_i64(t, o->in1, 32);
3976 } else {
3977 t = o->in1;
3979 gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3980 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3981 /* The arithmetic left shift is curious in that it does not affect
3982 the sign bit. Copy that over from the source unchanged. */
3983 tcg_gen_andi_i64(o->out, o->out, ~sign);
3984 tcg_gen_andi_i64(o->in1, o->in1, sign);
3985 tcg_gen_or_i64(o->out, o->out, o->in1);
3986 return DISAS_NEXT;
3989 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3991 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3992 return DISAS_NEXT;
3995 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3997 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3998 return DISAS_NEXT;
4001 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4003 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4004 return DISAS_NEXT;
4007 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4009 gen_helper_sfpc(tcg_env, o->in2);
4010 return DISAS_NEXT;
4013 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4015 gen_helper_sfas(tcg_env, o->in2);
4016 return DISAS_NEXT;
4019 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4021 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4022 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4023 gen_helper_srnm(tcg_env, o->addr1);
4024 return DISAS_NEXT;
4027 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4029 /* Bits 0-55 are are ignored. */
4030 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4031 gen_helper_srnm(tcg_env, o->addr1);
4032 return DISAS_NEXT;
4035 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4037 TCGv_i64 tmp = tcg_temp_new_i64();
4039 /* Bits other than 61-63 are ignored. */
4040 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4042 /* No need to call a helper, we don't implement dfp */
4043 tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4044 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4045 tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4046 return DISAS_NEXT;
4049 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4051 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4052 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4053 set_cc_static(s);
4055 tcg_gen_shri_i64(o->in1, o->in1, 24);
4056 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4057 return DISAS_NEXT;
4060 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4062 int b1 = get_field(s, b1);
4063 int d1 = get_field(s, d1);
4064 int b2 = get_field(s, b2);
4065 int d2 = get_field(s, d2);
4066 int r3 = get_field(s, r3);
4067 TCGv_i64 tmp = tcg_temp_new_i64();
4069 /* fetch all operands first */
4070 o->in1 = tcg_temp_new_i64();
4071 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4072 o->in2 = tcg_temp_new_i64();
4073 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4074 o->addr1 = tcg_temp_new_i64();
4075 gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4077 /* load the third operand into r3 before modifying anything */
4078 tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4080 /* subtract CPU timer from first operand and store in GR0 */
4081 gen_helper_stpt(tmp, tcg_env);
4082 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4084 /* store second operand in GR1 */
4085 tcg_gen_mov_i64(regs[1], o->in2);
4086 return DISAS_NEXT;
4089 #ifndef CONFIG_USER_ONLY
4090 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4092 tcg_gen_shri_i64(o->in2, o->in2, 4);
4093 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4094 return DISAS_NEXT;
4097 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4099 gen_helper_sske(tcg_env, o->in1, o->in2);
4100 return DISAS_NEXT;
4103 static void gen_check_psw_mask(DisasContext *s)
4105 TCGv_i64 reserved = tcg_temp_new_i64();
4106 TCGLabel *ok = gen_new_label();
4108 tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4109 tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4110 gen_program_exception(s, PGM_SPECIFICATION);
4111 gen_set_label(ok);
4114 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4116 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4118 gen_check_psw_mask(s);
4120 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4121 s->exit_to_mainloop = true;
4122 return DISAS_TOO_MANY;
4125 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4127 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4128 return DISAS_NEXT;
4130 #endif
4132 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4134 gen_helper_stck(o->out, tcg_env);
4135 /* ??? We don't implement clock states. */
4136 gen_op_movi_cc(s, 0);
4137 return DISAS_NEXT;
4140 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4142 TCGv_i64 c1 = tcg_temp_new_i64();
4143 TCGv_i64 c2 = tcg_temp_new_i64();
4144 TCGv_i64 todpr = tcg_temp_new_i64();
4145 gen_helper_stck(c1, tcg_env);
4146 /* 16 bit value store in an uint32_t (only valid bits set) */
4147 tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4148 /* Shift the 64-bit value into its place as a zero-extended
4149 104-bit value. Note that "bit positions 64-103 are always
4150 non-zero so that they compare differently to STCK"; we set
4151 the least significant bit to 1. */
4152 tcg_gen_shli_i64(c2, c1, 56);
4153 tcg_gen_shri_i64(c1, c1, 8);
4154 tcg_gen_ori_i64(c2, c2, 0x10000);
4155 tcg_gen_or_i64(c2, c2, todpr);
4156 tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4157 tcg_gen_addi_i64(o->in2, o->in2, 8);
4158 tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4159 /* ??? We don't implement clock states. */
4160 gen_op_movi_cc(s, 0);
4161 return DISAS_NEXT;
4164 #ifndef CONFIG_USER_ONLY
4165 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4167 gen_helper_sck(cc_op, tcg_env, o->in2);
4168 set_cc_static(s);
4169 return DISAS_NEXT;
4172 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4174 gen_helper_sckc(tcg_env, o->in2);
4175 return DISAS_NEXT;
4178 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4180 gen_helper_sckpf(tcg_env, regs[0]);
4181 return DISAS_NEXT;
4184 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4186 gen_helper_stckc(o->out, tcg_env);
4187 return DISAS_NEXT;
4190 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4192 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4193 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4195 gen_helper_stctg(tcg_env, r1, o->in2, r3);
4196 return DISAS_NEXT;
4199 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4201 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4202 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4204 gen_helper_stctl(tcg_env, r1, o->in2, r3);
4205 return DISAS_NEXT;
4208 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4210 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4211 return DISAS_NEXT;
4214 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4216 gen_helper_spt(tcg_env, o->in2);
4217 return DISAS_NEXT;
4220 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4222 gen_helper_stfl(tcg_env);
4223 return DISAS_NEXT;
4226 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4228 gen_helper_stpt(o->out, tcg_env);
4229 return DISAS_NEXT;
4232 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4234 gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4235 set_cc_static(s);
4236 return DISAS_NEXT;
4239 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4241 gen_helper_spx(tcg_env, o->in2);
4242 return DISAS_NEXT;
4245 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4247 gen_helper_xsch(tcg_env, regs[1]);
4248 set_cc_static(s);
4249 return DISAS_NEXT;
4252 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4254 gen_helper_csch(tcg_env, regs[1]);
4255 set_cc_static(s);
4256 return DISAS_NEXT;
4259 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4261 gen_helper_hsch(tcg_env, regs[1]);
4262 set_cc_static(s);
4263 return DISAS_NEXT;
4266 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4268 gen_helper_msch(tcg_env, regs[1], o->in2);
4269 set_cc_static(s);
4270 return DISAS_NEXT;
4273 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4275 gen_helper_rchp(tcg_env, regs[1]);
4276 set_cc_static(s);
4277 return DISAS_NEXT;
4280 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4282 gen_helper_rsch(tcg_env, regs[1]);
4283 set_cc_static(s);
4284 return DISAS_NEXT;
4287 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4289 gen_helper_sal(tcg_env, regs[1]);
4290 return DISAS_NEXT;
4293 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4295 gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4296 return DISAS_NEXT;
4299 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4301 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4302 gen_op_movi_cc(s, 3);
4303 return DISAS_NEXT;
4306 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4308 /* The instruction is suppressed if not provided. */
4309 return DISAS_NEXT;
4312 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4314 gen_helper_ssch(tcg_env, regs[1], o->in2);
4315 set_cc_static(s);
4316 return DISAS_NEXT;
4319 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4321 gen_helper_stsch(tcg_env, regs[1], o->in2);
4322 set_cc_static(s);
4323 return DISAS_NEXT;
4326 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4328 gen_helper_stcrw(tcg_env, o->in2);
4329 set_cc_static(s);
4330 return DISAS_NEXT;
4333 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4335 gen_helper_tpi(cc_op, tcg_env, o->addr1);
4336 set_cc_static(s);
4337 return DISAS_NEXT;
4340 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4342 gen_helper_tsch(tcg_env, regs[1], o->in2);
4343 set_cc_static(s);
4344 return DISAS_NEXT;
4347 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4349 gen_helper_chsc(tcg_env, o->in2);
4350 set_cc_static(s);
4351 return DISAS_NEXT;
4354 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4356 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4357 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4358 return DISAS_NEXT;
4361 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4363 uint64_t i2 = get_field(s, i2);
4364 TCGv_i64 t;
4366 /* It is important to do what the instruction name says: STORE THEN.
4367 If we let the output hook perform the store then if we fault and
4368 restart, we'll have the wrong SYSTEM MASK in place. */
4369 t = tcg_temp_new_i64();
4370 tcg_gen_shri_i64(t, psw_mask, 56);
4371 tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4373 if (s->fields.op == 0xac) {
4374 tcg_gen_andi_i64(psw_mask, psw_mask,
4375 (i2 << 56) | 0x00ffffffffffffffull);
4376 } else {
4377 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4380 gen_check_psw_mask(s);
4382 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4383 s->exit_to_mainloop = true;
4384 return DISAS_TOO_MANY;
4387 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4389 tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4391 if (s->base.tb->flags & FLAG_MASK_PER) {
4392 update_psw_addr(s);
4393 gen_helper_per_store_real(tcg_env);
4395 return DISAS_NEXT;
4397 #endif
4399 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4401 gen_helper_stfle(cc_op, tcg_env, o->in2);
4402 set_cc_static(s);
4403 return DISAS_NEXT;
4406 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4408 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4409 return DISAS_NEXT;
4412 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4414 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4415 return DISAS_NEXT;
4418 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4420 tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4421 MO_TEUL | s->insn->data);
4422 return DISAS_NEXT;
4425 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4427 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4428 MO_TEUQ | s->insn->data);
4429 return DISAS_NEXT;
4432 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4434 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4435 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4437 gen_helper_stam(tcg_env, r1, o->in2, r3);
4438 return DISAS_NEXT;
4441 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4443 int m3 = get_field(s, m3);
4444 int pos, base = s->insn->data;
4445 TCGv_i64 tmp = tcg_temp_new_i64();
4447 pos = base + ctz32(m3) * 8;
4448 switch (m3) {
4449 case 0xf:
4450 /* Effectively a 32-bit store. */
4451 tcg_gen_shri_i64(tmp, o->in1, pos);
4452 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4453 break;
4455 case 0xc:
4456 case 0x6:
4457 case 0x3:
4458 /* Effectively a 16-bit store. */
4459 tcg_gen_shri_i64(tmp, o->in1, pos);
4460 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4461 break;
4463 case 0x8:
4464 case 0x4:
4465 case 0x2:
4466 case 0x1:
4467 /* Effectively an 8-bit store. */
4468 tcg_gen_shri_i64(tmp, o->in1, pos);
4469 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4470 break;
4472 default:
4473 /* This is going to be a sequence of shifts and stores. */
4474 pos = base + 32 - 8;
4475 while (m3) {
4476 if (m3 & 0x8) {
4477 tcg_gen_shri_i64(tmp, o->in1, pos);
4478 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4479 tcg_gen_addi_i64(o->in2, o->in2, 1);
4481 m3 = (m3 << 1) & 0xf;
4482 pos -= 8;
4484 break;
4486 return DISAS_NEXT;
4489 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4491 int r1 = get_field(s, r1);
4492 int r3 = get_field(s, r3);
4493 int size = s->insn->data;
4494 TCGv_i64 tsize = tcg_constant_i64(size);
4496 while (1) {
4497 tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4498 size == 8 ? MO_TEUQ : MO_TEUL);
4499 if (r1 == r3) {
4500 break;
4502 tcg_gen_add_i64(o->in2, o->in2, tsize);
4503 r1 = (r1 + 1) & 15;
4506 return DISAS_NEXT;
4509 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4511 int r1 = get_field(s, r1);
4512 int r3 = get_field(s, r3);
4513 TCGv_i64 t = tcg_temp_new_i64();
4514 TCGv_i64 t4 = tcg_constant_i64(4);
4515 TCGv_i64 t32 = tcg_constant_i64(32);
4517 while (1) {
4518 tcg_gen_shl_i64(t, regs[r1], t32);
4519 tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4520 if (r1 == r3) {
4521 break;
4523 tcg_gen_add_i64(o->in2, o->in2, t4);
4524 r1 = (r1 + 1) & 15;
4526 return DISAS_NEXT;
4529 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4531 TCGv_i128 t16 = tcg_temp_new_i128();
4533 tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4534 tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4535 MO_TE | MO_128 | MO_ALIGN);
4536 return DISAS_NEXT;
4539 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4541 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4542 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4544 gen_helper_srst(tcg_env, r1, r2);
4545 set_cc_static(s);
4546 return DISAS_NEXT;
4549 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4551 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4552 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4554 gen_helper_srstu(tcg_env, r1, r2);
4555 set_cc_static(s);
4556 return DISAS_NEXT;
4559 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4561 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4562 return DISAS_NEXT;
4565 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4567 tcg_gen_movi_i64(cc_src, 0);
4568 tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4569 return DISAS_NEXT;
4572 /* Compute borrow (0, -1) into cc_src. */
4573 static void compute_borrow(DisasContext *s)
4575 switch (s->cc_op) {
4576 case CC_OP_SUBU:
4577 /* The borrow value is already in cc_src (0,-1). */
4578 break;
4579 default:
4580 gen_op_calc_cc(s);
4581 /* fall through */
4582 case CC_OP_STATIC:
4583 /* The carry flag is the msb of CC; compute into cc_src. */
4584 tcg_gen_extu_i32_i64(cc_src, cc_op);
4585 tcg_gen_shri_i64(cc_src, cc_src, 1);
4586 /* fall through */
4587 case CC_OP_ADDU:
4588 /* Convert carry (1,0) to borrow (0,-1). */
4589 tcg_gen_subi_i64(cc_src, cc_src, 1);
4590 break;
4594 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4596 compute_borrow(s);
4598 /* Borrow is {0, -1}, so add to subtract. */
4599 tcg_gen_add_i64(o->out, o->in1, cc_src);
4600 tcg_gen_sub_i64(o->out, o->out, o->in2);
4601 return DISAS_NEXT;
4604 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4606 compute_borrow(s);
4609 * Borrow is {0, -1}, so add to subtract; replicate the
4610 * borrow input to produce 128-bit -1 for the addition.
4612 TCGv_i64 zero = tcg_constant_i64(0);
4613 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4614 tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4616 return DISAS_NEXT;
4619 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4621 TCGv_i32 t;
4623 update_psw_addr(s);
4624 update_cc_op(s);
4626 t = tcg_constant_i32(get_field(s, i1) & 0xff);
4627 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4629 t = tcg_constant_i32(s->ilen);
4630 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4632 gen_exception(EXCP_SVC);
4633 return DISAS_NORETURN;
4636 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4638 int cc = 0;
4640 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4641 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4642 gen_op_movi_cc(s, cc);
4643 return DISAS_NEXT;
4646 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4648 gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4649 set_cc_static(s);
4650 return DISAS_NEXT;
4653 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4655 gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4656 set_cc_static(s);
4657 return DISAS_NEXT;
4660 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4662 gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4663 set_cc_static(s);
4664 return DISAS_NEXT;
4667 #ifndef CONFIG_USER_ONLY
4669 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4671 gen_helper_testblock(cc_op, tcg_env, o->in2);
4672 set_cc_static(s);
4673 return DISAS_NEXT;
4676 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4678 gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4679 set_cc_static(s);
4680 return DISAS_NEXT;
4683 #endif
4685 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4687 TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4689 gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4690 set_cc_static(s);
4691 return DISAS_NEXT;
4694 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4696 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4698 gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4699 set_cc_static(s);
4700 return DISAS_NEXT;
4703 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4705 TCGv_i128 pair = tcg_temp_new_i128();
4707 gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4708 tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4709 set_cc_static(s);
4710 return DISAS_NEXT;
4713 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4715 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4717 gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4718 set_cc_static(s);
4719 return DISAS_NEXT;
4722 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4724 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4726 gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4727 set_cc_static(s);
4728 return DISAS_NEXT;
4731 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4733 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4734 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4735 TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4736 TCGv_i32 tst = tcg_temp_new_i32();
4737 int m3 = get_field(s, m3);
4739 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4740 m3 = 0;
4742 if (m3 & 1) {
4743 tcg_gen_movi_i32(tst, -1);
4744 } else {
4745 tcg_gen_extrl_i64_i32(tst, regs[0]);
4746 if (s->insn->opc & 3) {
4747 tcg_gen_ext8u_i32(tst, tst);
4748 } else {
4749 tcg_gen_ext16u_i32(tst, tst);
4752 gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4754 set_cc_static(s);
4755 return DISAS_NEXT;
4758 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4760 TCGv_i32 t1 = tcg_constant_i32(0xff);
4762 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4763 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4764 set_cc_static(s);
4765 return DISAS_NEXT;
4768 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4770 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4772 gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4773 return DISAS_NEXT;
4776 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4778 int l1 = get_field(s, l1) + 1;
4779 TCGv_i32 l;
4781 /* The length must not exceed 32 bytes. */
4782 if (l1 > 32) {
4783 gen_program_exception(s, PGM_SPECIFICATION);
4784 return DISAS_NORETURN;
4786 l = tcg_constant_i32(l1);
4787 gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4788 set_cc_static(s);
4789 return DISAS_NEXT;
4792 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4794 int l1 = get_field(s, l1) + 1;
4795 TCGv_i32 l;
4797 /* The length must be even and should not exceed 64 bytes. */
4798 if ((l1 & 1) || (l1 > 64)) {
4799 gen_program_exception(s, PGM_SPECIFICATION);
4800 return DISAS_NORETURN;
4802 l = tcg_constant_i32(l1);
4803 gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4804 set_cc_static(s);
4805 return DISAS_NEXT;
4809 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4811 int d1 = get_field(s, d1);
4812 int d2 = get_field(s, d2);
4813 int b1 = get_field(s, b1);
4814 int b2 = get_field(s, b2);
4815 int l = get_field(s, l1);
4816 TCGv_i32 t32;
4818 o->addr1 = get_address(s, 0, b1, d1);
4820 /* If the addresses are identical, this is a store/memset of zero. */
4821 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4822 o->in2 = tcg_constant_i64(0);
4824 l++;
4825 while (l >= 8) {
4826 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4827 l -= 8;
4828 if (l > 0) {
4829 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4832 if (l >= 4) {
4833 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4834 l -= 4;
4835 if (l > 0) {
4836 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4839 if (l >= 2) {
4840 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4841 l -= 2;
4842 if (l > 0) {
4843 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4846 if (l) {
4847 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4849 gen_op_movi_cc(s, 0);
4850 return DISAS_NEXT;
4853 /* But in general we'll defer to a helper. */
4854 o->in2 = get_address(s, 0, b2, d2);
4855 t32 = tcg_constant_i32(l);
4856 gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4857 set_cc_static(s);
4858 return DISAS_NEXT;
4861 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4863 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4864 return DISAS_NEXT;
4867 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4869 int shift = s->insn->data & 0xff;
4870 int size = s->insn->data >> 8;
4871 uint64_t mask = ((1ull << size) - 1) << shift;
4872 TCGv_i64 t = tcg_temp_new_i64();
4874 tcg_gen_shli_i64(t, o->in2, shift);
4875 tcg_gen_xor_i64(o->out, o->in1, t);
4877 /* Produce the CC from only the bits manipulated. */
4878 tcg_gen_andi_i64(cc_dst, o->out, mask);
4879 set_cc_nz_u64(s, cc_dst);
4880 return DISAS_NEXT;
4883 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4885 o->in1 = tcg_temp_new_i64();
4887 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4888 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4889 } else {
4890 /* Perform the atomic operation in memory. */
4891 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4892 s->insn->data);
4895 /* Recompute also for atomic case: needed for setting CC. */
4896 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4898 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4899 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4901 return DISAS_NEXT;
4904 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4906 o->out = tcg_constant_i64(0);
4907 return DISAS_NEXT;
4910 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4912 o->out = tcg_constant_i64(0);
4913 o->out2 = o->out;
4914 return DISAS_NEXT;
4917 #ifndef CONFIG_USER_ONLY
4918 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4920 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4922 gen_helper_clp(tcg_env, r2);
4923 set_cc_static(s);
4924 return DISAS_NEXT;
4927 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4929 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4930 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4932 gen_helper_pcilg(tcg_env, r1, r2);
4933 set_cc_static(s);
4934 return DISAS_NEXT;
4937 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4939 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4940 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4942 gen_helper_pcistg(tcg_env, r1, r2);
4943 set_cc_static(s);
4944 return DISAS_NEXT;
4947 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4949 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4950 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4952 gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4953 set_cc_static(s);
4954 return DISAS_NEXT;
4957 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4959 gen_helper_sic(tcg_env, o->in1, o->in2);
4960 return DISAS_NEXT;
4963 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4965 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4966 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4968 gen_helper_rpcit(tcg_env, r1, r2);
4969 set_cc_static(s);
4970 return DISAS_NEXT;
4973 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4975 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4976 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4977 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4979 gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
4980 set_cc_static(s);
4981 return DISAS_NEXT;
4984 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4986 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4987 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4989 gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
4990 set_cc_static(s);
4991 return DISAS_NEXT;
4993 #endif
4995 #include "translate_vx.c.inc"
4997 /* ====================================================================== */
4998 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4999 the original inputs), update the various cc data structures in order to
5000 be able to compute the new condition code. */
5002 static void cout_abs32(DisasContext *s, DisasOps *o)
5004 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5007 static void cout_abs64(DisasContext *s, DisasOps *o)
5009 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5012 static void cout_adds32(DisasContext *s, DisasOps *o)
5014 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5017 static void cout_adds64(DisasContext *s, DisasOps *o)
5019 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5022 static void cout_addu32(DisasContext *s, DisasOps *o)
5024 tcg_gen_shri_i64(cc_src, o->out, 32);
5025 tcg_gen_ext32u_i64(cc_dst, o->out);
5026 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5029 static void cout_addu64(DisasContext *s, DisasOps *o)
5031 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5034 static void cout_cmps32(DisasContext *s, DisasOps *o)
5036 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5039 static void cout_cmps64(DisasContext *s, DisasOps *o)
5041 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5044 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5046 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5049 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5051 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5054 static void cout_f32(DisasContext *s, DisasOps *o)
5056 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5059 static void cout_f64(DisasContext *s, DisasOps *o)
5061 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5064 static void cout_f128(DisasContext *s, DisasOps *o)
5066 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5069 static void cout_nabs32(DisasContext *s, DisasOps *o)
5071 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5074 static void cout_nabs64(DisasContext *s, DisasOps *o)
5076 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5079 static void cout_neg32(DisasContext *s, DisasOps *o)
5081 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5084 static void cout_neg64(DisasContext *s, DisasOps *o)
5086 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5089 static void cout_nz32(DisasContext *s, DisasOps *o)
5091 tcg_gen_ext32u_i64(cc_dst, o->out);
5092 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5095 static void cout_nz64(DisasContext *s, DisasOps *o)
5097 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5100 static void cout_s32(DisasContext *s, DisasOps *o)
5102 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5105 static void cout_s64(DisasContext *s, DisasOps *o)
5107 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5110 static void cout_subs32(DisasContext *s, DisasOps *o)
5112 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5115 static void cout_subs64(DisasContext *s, DisasOps *o)
5117 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5120 static void cout_subu32(DisasContext *s, DisasOps *o)
5122 tcg_gen_sari_i64(cc_src, o->out, 32);
5123 tcg_gen_ext32u_i64(cc_dst, o->out);
5124 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5127 static void cout_subu64(DisasContext *s, DisasOps *o)
5129 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5132 static void cout_tm32(DisasContext *s, DisasOps *o)
5134 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5137 static void cout_tm64(DisasContext *s, DisasOps *o)
5139 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5142 static void cout_muls32(DisasContext *s, DisasOps *o)
5144 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5147 static void cout_muls64(DisasContext *s, DisasOps *o)
5149 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5150 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5153 /* ====================================================================== */
5154 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5155 with the TCG register to which we will write. Used in combination with
5156 the "wout" generators, in some cases we need a new temporary, and in
5157 some cases we can write to a TCG global. */
5159 static void prep_new(DisasContext *s, DisasOps *o)
5161 o->out = tcg_temp_new_i64();
5163 #define SPEC_prep_new 0
5165 static void prep_new_P(DisasContext *s, DisasOps *o)
5167 o->out = tcg_temp_new_i64();
5168 o->out2 = tcg_temp_new_i64();
5170 #define SPEC_prep_new_P 0
5172 static void prep_new_x(DisasContext *s, DisasOps *o)
5174 o->out_128 = tcg_temp_new_i128();
5176 #define SPEC_prep_new_x 0
5178 static void prep_r1(DisasContext *s, DisasOps *o)
5180 o->out = regs[get_field(s, r1)];
5182 #define SPEC_prep_r1 0
5184 static void prep_r1_P(DisasContext *s, DisasOps *o)
5186 int r1 = get_field(s, r1);
5187 o->out = regs[r1];
5188 o->out2 = regs[r1 + 1];
5190 #define SPEC_prep_r1_P SPEC_r1_even
5192 /* ====================================================================== */
5193 /* The "Write OUTput" generators. These generally perform some non-trivial
5194 copy of data to TCG globals, or to main memory. The trivial cases are
5195 generally handled by having a "prep" generator install the TCG global
5196 as the destination of the operation. */
5198 static void wout_r1(DisasContext *s, DisasOps *o)
5200 store_reg(get_field(s, r1), o->out);
5202 #define SPEC_wout_r1 0
5204 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5206 store_reg(get_field(s, r1), o->out2);
5208 #define SPEC_wout_out2_r1 0
5210 static void wout_r1_8(DisasContext *s, DisasOps *o)
5212 int r1 = get_field(s, r1);
5213 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5215 #define SPEC_wout_r1_8 0
5217 static void wout_r1_16(DisasContext *s, DisasOps *o)
5219 int r1 = get_field(s, r1);
5220 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5222 #define SPEC_wout_r1_16 0
5224 static void wout_r1_32(DisasContext *s, DisasOps *o)
5226 store_reg32_i64(get_field(s, r1), o->out);
5228 #define SPEC_wout_r1_32 0
5230 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5232 store_reg32h_i64(get_field(s, r1), o->out);
5234 #define SPEC_wout_r1_32h 0
5236 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5238 int r1 = get_field(s, r1);
5239 store_reg32_i64(r1, o->out);
5240 store_reg32_i64(r1 + 1, o->out2);
5242 #define SPEC_wout_r1_P32 SPEC_r1_even
5244 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5246 int r1 = get_field(s, r1);
5247 TCGv_i64 t = tcg_temp_new_i64();
5248 store_reg32_i64(r1 + 1, o->out);
5249 tcg_gen_shri_i64(t, o->out, 32);
5250 store_reg32_i64(r1, t);
5252 #define SPEC_wout_r1_D32 SPEC_r1_even
5254 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5256 int r1 = get_field(s, r1);
5257 tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5259 #define SPEC_wout_r1_D64 SPEC_r1_even
5261 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5263 int r3 = get_field(s, r3);
5264 store_reg32_i64(r3, o->out);
5265 store_reg32_i64(r3 + 1, o->out2);
5267 #define SPEC_wout_r3_P32 SPEC_r3_even
5269 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5271 int r3 = get_field(s, r3);
5272 store_reg(r3, o->out);
5273 store_reg(r3 + 1, o->out2);
5275 #define SPEC_wout_r3_P64 SPEC_r3_even
5277 static void wout_e1(DisasContext *s, DisasOps *o)
5279 store_freg32_i64(get_field(s, r1), o->out);
5281 #define SPEC_wout_e1 0
5283 static void wout_f1(DisasContext *s, DisasOps *o)
5285 store_freg(get_field(s, r1), o->out);
5287 #define SPEC_wout_f1 0
5289 static void wout_x1(DisasContext *s, DisasOps *o)
5291 int f1 = get_field(s, r1);
5293 /* Split out_128 into out+out2 for cout_f128. */
5294 tcg_debug_assert(o->out == NULL);
5295 o->out = tcg_temp_new_i64();
5296 o->out2 = tcg_temp_new_i64();
5298 tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5299 store_freg(f1, o->out);
5300 store_freg(f1 + 2, o->out2);
5302 #define SPEC_wout_x1 SPEC_r1_f128
5304 static void wout_x1_P(DisasContext *s, DisasOps *o)
5306 int f1 = get_field(s, r1);
5307 store_freg(f1, o->out);
5308 store_freg(f1 + 2, o->out2);
5310 #define SPEC_wout_x1_P SPEC_r1_f128
5312 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5314 if (get_field(s, r1) != get_field(s, r2)) {
5315 store_reg32_i64(get_field(s, r1), o->out);
5318 #define SPEC_wout_cond_r1r2_32 0
5320 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5322 if (get_field(s, r1) != get_field(s, r2)) {
5323 store_freg32_i64(get_field(s, r1), o->out);
5326 #define SPEC_wout_cond_e1e2 0
5328 static void wout_m1_8(DisasContext *s, DisasOps *o)
5330 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5332 #define SPEC_wout_m1_8 0
5334 static void wout_m1_16(DisasContext *s, DisasOps *o)
5336 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5338 #define SPEC_wout_m1_16 0
5340 #ifndef CONFIG_USER_ONLY
5341 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5343 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5345 #define SPEC_wout_m1_16a 0
5346 #endif
5348 static void wout_m1_32(DisasContext *s, DisasOps *o)
5350 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5352 #define SPEC_wout_m1_32 0
5354 #ifndef CONFIG_USER_ONLY
5355 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5357 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5359 #define SPEC_wout_m1_32a 0
5360 #endif
5362 static void wout_m1_64(DisasContext *s, DisasOps *o)
5364 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5366 #define SPEC_wout_m1_64 0
5368 #ifndef CONFIG_USER_ONLY
5369 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5371 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5373 #define SPEC_wout_m1_64a 0
5374 #endif
5376 static void wout_m2_32(DisasContext *s, DisasOps *o)
5378 tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5380 #define SPEC_wout_m2_32 0
5382 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5384 store_reg(get_field(s, r1), o->in2);
5386 #define SPEC_wout_in2_r1 0
5388 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5390 store_reg32_i64(get_field(s, r1), o->in2);
5392 #define SPEC_wout_in2_r1_32 0
5394 /* ====================================================================== */
5395 /* The "INput 1" generators. These load the first operand to an insn. */
5397 static void in1_r1(DisasContext *s, DisasOps *o)
5399 o->in1 = load_reg(get_field(s, r1));
5401 #define SPEC_in1_r1 0
5403 static void in1_r1_o(DisasContext *s, DisasOps *o)
5405 o->in1 = regs[get_field(s, r1)];
5407 #define SPEC_in1_r1_o 0
5409 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5411 o->in1 = tcg_temp_new_i64();
5412 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5414 #define SPEC_in1_r1_32s 0
5416 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5418 o->in1 = tcg_temp_new_i64();
5419 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5421 #define SPEC_in1_r1_32u 0
5423 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5425 o->in1 = tcg_temp_new_i64();
5426 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5428 #define SPEC_in1_r1_sr32 0
5430 static void in1_r1p1(DisasContext *s, DisasOps *o)
5432 o->in1 = load_reg(get_field(s, r1) + 1);
5434 #define SPEC_in1_r1p1 SPEC_r1_even
5436 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5438 o->in1 = regs[get_field(s, r1) + 1];
5440 #define SPEC_in1_r1p1_o SPEC_r1_even
5442 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5444 o->in1 = tcg_temp_new_i64();
5445 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5447 #define SPEC_in1_r1p1_32s SPEC_r1_even
5449 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5451 o->in1 = tcg_temp_new_i64();
5452 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5454 #define SPEC_in1_r1p1_32u SPEC_r1_even
5456 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5458 int r1 = get_field(s, r1);
5459 o->in1 = tcg_temp_new_i64();
5460 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5462 #define SPEC_in1_r1_D32 SPEC_r1_even
5464 static void in1_r2(DisasContext *s, DisasOps *o)
5466 o->in1 = load_reg(get_field(s, r2));
5468 #define SPEC_in1_r2 0
5470 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5472 o->in1 = tcg_temp_new_i64();
5473 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5475 #define SPEC_in1_r2_sr32 0
5477 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5479 o->in1 = tcg_temp_new_i64();
5480 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5482 #define SPEC_in1_r2_32u 0
5484 static void in1_r3(DisasContext *s, DisasOps *o)
5486 o->in1 = load_reg(get_field(s, r3));
5488 #define SPEC_in1_r3 0
5490 static void in1_r3_o(DisasContext *s, DisasOps *o)
5492 o->in1 = regs[get_field(s, r3)];
5494 #define SPEC_in1_r3_o 0
5496 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5498 o->in1 = tcg_temp_new_i64();
5499 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5501 #define SPEC_in1_r3_32s 0
5503 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5505 o->in1 = tcg_temp_new_i64();
5506 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5508 #define SPEC_in1_r3_32u 0
5510 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5512 int r3 = get_field(s, r3);
5513 o->in1 = tcg_temp_new_i64();
5514 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5516 #define SPEC_in1_r3_D32 SPEC_r3_even
5518 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5520 o->in1 = tcg_temp_new_i64();
5521 tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5523 #define SPEC_in1_r3_sr32 0
5525 static void in1_e1(DisasContext *s, DisasOps *o)
5527 o->in1 = load_freg32_i64(get_field(s, r1));
5529 #define SPEC_in1_e1 0
5531 static void in1_f1(DisasContext *s, DisasOps *o)
5533 o->in1 = load_freg(get_field(s, r1));
5535 #define SPEC_in1_f1 0
5537 static void in1_x1(DisasContext *s, DisasOps *o)
5539 o->in1_128 = load_freg_128(get_field(s, r1));
5541 #define SPEC_in1_x1 SPEC_r1_f128
5543 /* Load the high double word of an extended (128-bit) format FP number */
5544 static void in1_x2h(DisasContext *s, DisasOps *o)
5546 o->in1 = load_freg(get_field(s, r2));
5548 #define SPEC_in1_x2h SPEC_r2_f128
5550 static void in1_f3(DisasContext *s, DisasOps *o)
5552 o->in1 = load_freg(get_field(s, r3));
5554 #define SPEC_in1_f3 0
5556 static void in1_la1(DisasContext *s, DisasOps *o)
5558 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5560 #define SPEC_in1_la1 0
5562 static void in1_la2(DisasContext *s, DisasOps *o)
5564 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5565 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5567 #define SPEC_in1_la2 0
5569 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5571 in1_la1(s, o);
5572 o->in1 = tcg_temp_new_i64();
5573 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5575 #define SPEC_in1_m1_8u 0
5577 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5579 in1_la1(s, o);
5580 o->in1 = tcg_temp_new_i64();
5581 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5583 #define SPEC_in1_m1_16s 0
5585 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5587 in1_la1(s, o);
5588 o->in1 = tcg_temp_new_i64();
5589 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5591 #define SPEC_in1_m1_16u 0
5593 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5595 in1_la1(s, o);
5596 o->in1 = tcg_temp_new_i64();
5597 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5599 #define SPEC_in1_m1_32s 0
5601 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5603 in1_la1(s, o);
5604 o->in1 = tcg_temp_new_i64();
5605 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5607 #define SPEC_in1_m1_32u 0
5609 static void in1_m1_64(DisasContext *s, DisasOps *o)
5611 in1_la1(s, o);
5612 o->in1 = tcg_temp_new_i64();
5613 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5615 #define SPEC_in1_m1_64 0
5617 /* ====================================================================== */
5618 /* The "INput 2" generators. These load the second operand to an insn. */
5620 static void in2_r1_o(DisasContext *s, DisasOps *o)
5622 o->in2 = regs[get_field(s, r1)];
5624 #define SPEC_in2_r1_o 0
5626 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5628 o->in2 = tcg_temp_new_i64();
5629 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5631 #define SPEC_in2_r1_16u 0
5633 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5635 o->in2 = tcg_temp_new_i64();
5636 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5638 #define SPEC_in2_r1_32u 0
5640 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5642 int r1 = get_field(s, r1);
5643 o->in2 = tcg_temp_new_i64();
5644 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5646 #define SPEC_in2_r1_D32 SPEC_r1_even
5648 static void in2_r2(DisasContext *s, DisasOps *o)
5650 o->in2 = load_reg(get_field(s, r2));
5652 #define SPEC_in2_r2 0
5654 static void in2_r2_o(DisasContext *s, DisasOps *o)
5656 o->in2 = regs[get_field(s, r2)];
5658 #define SPEC_in2_r2_o 0
5660 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5662 int r2 = get_field(s, r2);
5663 if (r2 != 0) {
5664 o->in2 = load_reg(r2);
5667 #define SPEC_in2_r2_nz 0
5669 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5671 o->in2 = tcg_temp_new_i64();
5672 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5674 #define SPEC_in2_r2_8s 0
5676 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5678 o->in2 = tcg_temp_new_i64();
5679 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5681 #define SPEC_in2_r2_8u 0
5683 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5685 o->in2 = tcg_temp_new_i64();
5686 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5688 #define SPEC_in2_r2_16s 0
5690 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5692 o->in2 = tcg_temp_new_i64();
5693 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5695 #define SPEC_in2_r2_16u 0
5697 static void in2_r3(DisasContext *s, DisasOps *o)
5699 o->in2 = load_reg(get_field(s, r3));
5701 #define SPEC_in2_r3 0
5703 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5705 int r3 = get_field(s, r3);
5706 o->in2_128 = tcg_temp_new_i128();
5707 tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5709 #define SPEC_in2_r3_D64 SPEC_r3_even
5711 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5713 o->in2 = tcg_temp_new_i64();
5714 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5716 #define SPEC_in2_r3_sr32 0
5718 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5720 o->in2 = tcg_temp_new_i64();
5721 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5723 #define SPEC_in2_r3_32u 0
5725 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5727 o->in2 = tcg_temp_new_i64();
5728 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5730 #define SPEC_in2_r2_32s 0
5732 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5734 o->in2 = tcg_temp_new_i64();
5735 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5737 #define SPEC_in2_r2_32u 0
5739 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5741 o->in2 = tcg_temp_new_i64();
5742 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5744 #define SPEC_in2_r2_sr32 0
5746 static void in2_e2(DisasContext *s, DisasOps *o)
5748 o->in2 = load_freg32_i64(get_field(s, r2));
5750 #define SPEC_in2_e2 0
5752 static void in2_f2(DisasContext *s, DisasOps *o)
5754 o->in2 = load_freg(get_field(s, r2));
5756 #define SPEC_in2_f2 0
5758 static void in2_x2(DisasContext *s, DisasOps *o)
5760 o->in2_128 = load_freg_128(get_field(s, r2));
5762 #define SPEC_in2_x2 SPEC_r2_f128
5764 /* Load the low double word of an extended (128-bit) format FP number */
5765 static void in2_x2l(DisasContext *s, DisasOps *o)
5767 o->in2 = load_freg(get_field(s, r2) + 2);
5769 #define SPEC_in2_x2l SPEC_r2_f128
5771 static void in2_ra2(DisasContext *s, DisasOps *o)
5773 int r2 = get_field(s, r2);
5775 /* Note: *don't* treat !r2 as 0, use the reg value. */
5776 o->in2 = tcg_temp_new_i64();
5777 gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5779 #define SPEC_in2_ra2 0
5781 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5783 return in2_ra2(s, o);
5785 #define SPEC_in2_ra2_E SPEC_r2_even
5787 static void in2_a2(DisasContext *s, DisasOps *o)
5789 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5790 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5792 #define SPEC_in2_a2 0
5794 static TCGv gen_ri2(DisasContext *s)
5796 TCGv ri2 = NULL;
5797 bool is_imm;
5798 int imm;
5800 disas_jdest(s, i2, is_imm, imm, ri2);
5801 if (is_imm) {
5802 ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5805 return ri2;
5808 static void in2_ri2(DisasContext *s, DisasOps *o)
5810 o->in2 = gen_ri2(s);
5812 #define SPEC_in2_ri2 0
5814 static void in2_sh(DisasContext *s, DisasOps *o)
5816 int b2 = get_field(s, b2);
5817 int d2 = get_field(s, d2);
5819 if (b2 == 0) {
5820 o->in2 = tcg_constant_i64(d2 & 0x3f);
5821 } else {
5822 o->in2 = get_address(s, 0, b2, d2);
5823 tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5826 #define SPEC_in2_sh 0
5828 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5830 in2_a2(s, o);
5831 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5833 #define SPEC_in2_m2_8u 0
5835 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5837 in2_a2(s, o);
5838 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5840 #define SPEC_in2_m2_16s 0
5842 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5844 in2_a2(s, o);
5845 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5847 #define SPEC_in2_m2_16u 0
5849 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5851 in2_a2(s, o);
5852 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5854 #define SPEC_in2_m2_32s 0
5856 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5858 in2_a2(s, o);
5859 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5861 #define SPEC_in2_m2_32u 0
5863 #ifndef CONFIG_USER_ONLY
5864 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5866 in2_a2(s, o);
5867 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5869 #define SPEC_in2_m2_32ua 0
5870 #endif
5872 static void in2_m2_64(DisasContext *s, DisasOps *o)
5874 in2_a2(s, o);
5875 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5877 #define SPEC_in2_m2_64 0
5879 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5881 in2_a2(s, o);
5882 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5883 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5885 #define SPEC_in2_m2_64w 0
5887 #ifndef CONFIG_USER_ONLY
5888 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5890 in2_a2(s, o);
5891 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5893 #define SPEC_in2_m2_64a 0
5894 #endif
5896 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5898 o->in2 = tcg_temp_new_i64();
5899 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5901 #define SPEC_in2_mri2_16s 0
5903 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5905 o->in2 = tcg_temp_new_i64();
5906 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5908 #define SPEC_in2_mri2_16u 0
5910 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5912 o->in2 = tcg_temp_new_i64();
5913 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5914 MO_TESL | MO_ALIGN);
5916 #define SPEC_in2_mri2_32s 0
5918 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5920 o->in2 = tcg_temp_new_i64();
5921 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5922 MO_TEUL | MO_ALIGN);
5924 #define SPEC_in2_mri2_32u 0
5926 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5928 o->in2 = tcg_temp_new_i64();
5929 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5930 MO_TEUQ | MO_ALIGN);
5932 #define SPEC_in2_mri2_64 0
5934 static void in2_i2(DisasContext *s, DisasOps *o)
5936 o->in2 = tcg_constant_i64(get_field(s, i2));
5938 #define SPEC_in2_i2 0
5940 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5942 o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5944 #define SPEC_in2_i2_8u 0
5946 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5948 o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5950 #define SPEC_in2_i2_16u 0
5952 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5954 o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5956 #define SPEC_in2_i2_32u 0
5958 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5960 uint64_t i2 = (uint16_t)get_field(s, i2);
5961 o->in2 = tcg_constant_i64(i2 << s->insn->data);
5963 #define SPEC_in2_i2_16u_shl 0
5965 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5967 uint64_t i2 = (uint32_t)get_field(s, i2);
5968 o->in2 = tcg_constant_i64(i2 << s->insn->data);
5970 #define SPEC_in2_i2_32u_shl 0
5972 #ifndef CONFIG_USER_ONLY
5973 static void in2_insn(DisasContext *s, DisasOps *o)
5975 o->in2 = tcg_constant_i64(s->fields.raw_insn);
5977 #define SPEC_in2_insn 0
5978 #endif
5980 /* ====================================================================== */
5982 /* Find opc within the table of insns. This is formulated as a switch
5983 statement so that (1) we get compile-time notice of cut-paste errors
5984 for duplicated opcodes, and (2) the compiler generates the binary
5985 search tree, rather than us having to post-process the table. */
5987 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5988 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5990 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5991 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5993 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5994 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5996 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5998 enum DisasInsnEnum {
5999 #include "insn-data.h.inc"
6002 #undef E
6003 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6004 .opc = OPC, \
6005 .flags = FL, \
6006 .fmt = FMT_##FT, \
6007 .fac = FAC_##FC, \
6008 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6009 .name = #NM, \
6010 .help_in1 = in1_##I1, \
6011 .help_in2 = in2_##I2, \
6012 .help_prep = prep_##P, \
6013 .help_wout = wout_##W, \
6014 .help_cout = cout_##CC, \
6015 .help_op = op_##OP, \
6016 .data = D \
6019 /* Allow 0 to be used for NULL in the table below. */
6020 #define in1_0 NULL
6021 #define in2_0 NULL
6022 #define prep_0 NULL
6023 #define wout_0 NULL
6024 #define cout_0 NULL
6025 #define op_0 NULL
6027 #define SPEC_in1_0 0
6028 #define SPEC_in2_0 0
6029 #define SPEC_prep_0 0
6030 #define SPEC_wout_0 0
6032 /* Give smaller names to the various facilities. */
6033 #define FAC_Z S390_FEAT_ZARCH
6034 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6035 #define FAC_DFP S390_FEAT_DFP
6036 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6037 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6038 #define FAC_EE S390_FEAT_EXECUTE_EXT
6039 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6040 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6041 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6042 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6043 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6044 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6045 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6046 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6047 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6048 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6049 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6050 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6051 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6052 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6053 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6054 #define FAC_SFLE S390_FEAT_STFLE
6055 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6056 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6057 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6058 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6059 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6060 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6061 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6062 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6063 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6064 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6065 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6066 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6067 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6068 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6069 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6070 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6071 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6072 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6073 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6074 #define FAC_VE2 S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6075 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6076 #define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6078 static const DisasInsn insn_info[] = {
6079 #include "insn-data.h.inc"
6082 #undef E
6083 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6084 case OPC: return &insn_info[insn_ ## NM];
6086 static const DisasInsn *lookup_opc(uint16_t opc)
6088 switch (opc) {
6089 #include "insn-data.h.inc"
6090 default:
6091 return NULL;
6095 #undef F
6096 #undef E
6097 #undef D
6098 #undef C
6100 /* Extract a field from the insn. The INSN should be left-aligned in
6101 the uint64_t so that we can more easily utilize the big-bit-endian
6102 definitions we extract from the Principals of Operation. */
6104 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6106 uint32_t r, m;
6108 if (f->size == 0) {
6109 return;
6112 /* Zero extract the field from the insn. */
6113 r = (insn << f->beg) >> (64 - f->size);
6115 /* Sign-extend, or un-swap the field as necessary. */
6116 switch (f->type) {
6117 case 0: /* unsigned */
6118 break;
6119 case 1: /* signed */
6120 assert(f->size <= 32);
6121 m = 1u << (f->size - 1);
6122 r = (r ^ m) - m;
6123 break;
6124 case 2: /* dl+dh split, signed 20 bit. */
6125 r = ((int8_t)r << 12) | (r >> 8);
6126 break;
6127 case 3: /* MSB stored in RXB */
6128 g_assert(f->size == 4);
6129 switch (f->beg) {
6130 case 8:
6131 r |= extract64(insn, 63 - 36, 1) << 4;
6132 break;
6133 case 12:
6134 r |= extract64(insn, 63 - 37, 1) << 4;
6135 break;
6136 case 16:
6137 r |= extract64(insn, 63 - 38, 1) << 4;
6138 break;
6139 case 32:
6140 r |= extract64(insn, 63 - 39, 1) << 4;
6141 break;
6142 default:
6143 g_assert_not_reached();
6145 break;
6146 default:
6147 abort();
6151 * Validate that the "compressed" encoding we selected above is valid.
6152 * I.e. we haven't made two different original fields overlap.
6154 assert(((o->presentC >> f->indexC) & 1) == 0);
6155 o->presentC |= 1 << f->indexC;
6156 o->presentO |= 1 << f->indexO;
6158 o->c[f->indexC] = r;
6161 /* Lookup the insn at the current PC, extracting the operands into O and
6162 returning the info struct for the insn. Returns NULL for invalid insn. */
6164 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6166 uint64_t insn, pc = s->base.pc_next;
6167 int op, op2, ilen;
6168 const DisasInsn *info;
6170 if (unlikely(s->ex_value)) {
6171 /* Drop the EX data now, so that it's clear on exception paths. */
6172 tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6173 offsetof(CPUS390XState, ex_value));
6175 /* Extract the values saved by EXECUTE. */
6176 insn = s->ex_value & 0xffffffffffff0000ull;
6177 ilen = s->ex_value & 0xf;
6179 /* Register insn bytes with translator so plugins work. */
6180 for (int i = 0; i < ilen; i++) {
6181 uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6182 translator_fake_ldb(byte, pc + i);
6184 op = insn >> 56;
6185 } else {
6186 insn = ld_code2(env, s, pc);
6187 op = (insn >> 8) & 0xff;
6188 ilen = get_ilen(op);
6189 switch (ilen) {
6190 case 2:
6191 insn = insn << 48;
6192 break;
6193 case 4:
6194 insn = ld_code4(env, s, pc) << 32;
6195 break;
6196 case 6:
6197 insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6198 break;
6199 default:
6200 g_assert_not_reached();
6203 s->pc_tmp = s->base.pc_next + ilen;
6204 s->ilen = ilen;
6206 /* We can't actually determine the insn format until we've looked up
6207 the full insn opcode. Which we can't do without locating the
6208 secondary opcode. Assume by default that OP2 is at bit 40; for
6209 those smaller insns that don't actually have a secondary opcode
6210 this will correctly result in OP2 = 0. */
6211 switch (op) {
6212 case 0x01: /* E */
6213 case 0x80: /* S */
6214 case 0x82: /* S */
6215 case 0x93: /* S */
6216 case 0xb2: /* S, RRF, RRE, IE */
6217 case 0xb3: /* RRE, RRD, RRF */
6218 case 0xb9: /* RRE, RRF */
6219 case 0xe5: /* SSE, SIL */
6220 op2 = (insn << 8) >> 56;
6221 break;
6222 case 0xa5: /* RI */
6223 case 0xa7: /* RI */
6224 case 0xc0: /* RIL */
6225 case 0xc2: /* RIL */
6226 case 0xc4: /* RIL */
6227 case 0xc6: /* RIL */
6228 case 0xc8: /* SSF */
6229 case 0xcc: /* RIL */
6230 op2 = (insn << 12) >> 60;
6231 break;
6232 case 0xc5: /* MII */
6233 case 0xc7: /* SMI */
6234 case 0xd0 ... 0xdf: /* SS */
6235 case 0xe1: /* SS */
6236 case 0xe2: /* SS */
6237 case 0xe8: /* SS */
6238 case 0xe9: /* SS */
6239 case 0xea: /* SS */
6240 case 0xee ... 0xf3: /* SS */
6241 case 0xf8 ... 0xfd: /* SS */
6242 op2 = 0;
6243 break;
6244 default:
6245 op2 = (insn << 40) >> 56;
6246 break;
6249 memset(&s->fields, 0, sizeof(s->fields));
6250 s->fields.raw_insn = insn;
6251 s->fields.op = op;
6252 s->fields.op2 = op2;
6254 /* Lookup the instruction. */
6255 info = lookup_opc(op << 8 | op2);
6256 s->insn = info;
6258 /* If we found it, extract the operands. */
6259 if (info != NULL) {
6260 DisasFormat fmt = info->fmt;
6261 int i;
6263 for (i = 0; i < NUM_C_FIELD; ++i) {
6264 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6267 return info;
6270 static bool is_afp_reg(int reg)
6272 return reg % 2 || reg > 6;
6275 static bool is_fp_pair(int reg)
6277 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6278 return !(reg & 0x2);
6281 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6283 const DisasInsn *insn;
6284 DisasJumpType ret = DISAS_NEXT;
6285 DisasOps o = {};
6286 bool icount = false;
6288 /* Search for the insn in the table. */
6289 insn = extract_insn(env, s);
6291 /* Update insn_start now that we know the ILEN. */
6292 tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6294 /* Not found means unimplemented/illegal opcode. */
6295 if (insn == NULL) {
6296 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6297 s->fields.op, s->fields.op2);
6298 gen_illegal_opcode(s);
6299 ret = DISAS_NORETURN;
6300 goto out;
6303 #ifndef CONFIG_USER_ONLY
6304 if (s->base.tb->flags & FLAG_MASK_PER) {
6305 TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6306 gen_helper_per_ifetch(tcg_env, addr);
6308 #endif
6310 /* process flags */
6311 if (insn->flags) {
6312 /* privileged instruction */
6313 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6314 gen_program_exception(s, PGM_PRIVILEGED);
6315 ret = DISAS_NORETURN;
6316 goto out;
6319 /* if AFP is not enabled, instructions and registers are forbidden */
6320 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6321 uint8_t dxc = 0;
6323 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6324 dxc = 1;
6326 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6327 dxc = 1;
6329 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6330 dxc = 1;
6332 if (insn->flags & IF_BFP) {
6333 dxc = 2;
6335 if (insn->flags & IF_DFP) {
6336 dxc = 3;
6338 if (insn->flags & IF_VEC) {
6339 dxc = 0xfe;
6341 if (dxc) {
6342 gen_data_exception(dxc);
6343 ret = DISAS_NORETURN;
6344 goto out;
6348 /* if vector instructions not enabled, executing them is forbidden */
6349 if (insn->flags & IF_VEC) {
6350 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6351 gen_data_exception(0xfe);
6352 ret = DISAS_NORETURN;
6353 goto out;
6357 /* input/output is the special case for icount mode */
6358 if (unlikely(insn->flags & IF_IO)) {
6359 icount = translator_io_start(&s->base);
6363 /* Check for insn specification exceptions. */
6364 if (insn->spec) {
6365 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6366 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6367 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6368 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6369 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6370 gen_program_exception(s, PGM_SPECIFICATION);
6371 ret = DISAS_NORETURN;
6372 goto out;
6376 /* Implement the instruction. */
6377 if (insn->help_in1) {
6378 insn->help_in1(s, &o);
6380 if (insn->help_in2) {
6381 insn->help_in2(s, &o);
6383 if (insn->help_prep) {
6384 insn->help_prep(s, &o);
6386 if (insn->help_op) {
6387 ret = insn->help_op(s, &o);
6389 if (ret != DISAS_NORETURN) {
6390 if (insn->help_wout) {
6391 insn->help_wout(s, &o);
6393 if (insn->help_cout) {
6394 insn->help_cout(s, &o);
6398 /* io should be the last instruction in tb when icount is enabled */
6399 if (unlikely(icount && ret == DISAS_NEXT)) {
6400 ret = DISAS_TOO_MANY;
6403 #ifndef CONFIG_USER_ONLY
6404 if (s->base.tb->flags & FLAG_MASK_PER) {
6405 /* An exception might be triggered, save PSW if not already done. */
6406 if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6407 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6410 /* Call the helper to check for a possible PER exception. */
6411 gen_helper_per_check_exception(tcg_env);
6413 #endif
6415 out:
6416 /* Advance to the next instruction. */
6417 s->base.pc_next = s->pc_tmp;
6418 return ret;
6421 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6423 DisasContext *dc = container_of(dcbase, DisasContext, base);
6425 /* 31-bit mode */
6426 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6427 dc->base.pc_first &= 0x7fffffff;
6428 dc->base.pc_next = dc->base.pc_first;
6431 dc->cc_op = CC_OP_DYNAMIC;
6432 dc->ex_value = dc->base.tb->cs_base;
6433 dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6436 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6440 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6442 DisasContext *dc = container_of(dcbase, DisasContext, base);
6444 /* Delay the set of ilen until we've read the insn. */
6445 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6446 dc->insn_start = tcg_last_op();
6449 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6450 uint64_t pc)
6452 uint64_t insn = cpu_lduw_code(env, pc);
6454 return pc + get_ilen((insn >> 8) & 0xff);
6457 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6459 CPUS390XState *env = cpu_env(cs);
6460 DisasContext *dc = container_of(dcbase, DisasContext, base);
6462 dc->base.is_jmp = translate_one(env, dc);
6463 if (dc->base.is_jmp == DISAS_NEXT) {
6464 if (dc->ex_value ||
6465 !is_same_page(dcbase, dc->base.pc_next) ||
6466 !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6467 dc->base.is_jmp = DISAS_TOO_MANY;
6472 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6474 DisasContext *dc = container_of(dcbase, DisasContext, base);
6476 switch (dc->base.is_jmp) {
6477 case DISAS_NORETURN:
6478 break;
6479 case DISAS_TOO_MANY:
6480 update_psw_addr(dc);
6481 /* FALLTHRU */
6482 case DISAS_PC_UPDATED:
6483 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6484 cc op type is in env */
6485 update_cc_op(dc);
6486 /* FALLTHRU */
6487 case DISAS_PC_CC_UPDATED:
6488 /* Exit the TB, either by raising a debug exception or by return. */
6489 if (dc->exit_to_mainloop) {
6490 tcg_gen_exit_tb(NULL, 0);
6491 } else {
6492 tcg_gen_lookup_and_goto_ptr();
6494 break;
6495 default:
6496 g_assert_not_reached();
6500 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6501 CPUState *cs, FILE *logfile)
6503 DisasContext *dc = container_of(dcbase, DisasContext, base);
6505 if (unlikely(dc->ex_value)) {
6506 /* ??? Unfortunately target_disas can't use host memory. */
6507 fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6508 } else {
6509 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6510 target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6514 static const TranslatorOps s390x_tr_ops = {
6515 .init_disas_context = s390x_tr_init_disas_context,
6516 .tb_start = s390x_tr_tb_start,
6517 .insn_start = s390x_tr_insn_start,
6518 .translate_insn = s390x_tr_translate_insn,
6519 .tb_stop = s390x_tr_tb_stop,
6520 .disas_log = s390x_tr_disas_log,
6523 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6524 vaddr pc, void *host_pc)
6526 DisasContext dc;
6528 translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6531 void s390x_restore_state_to_opc(CPUState *cs,
6532 const TranslationBlock *tb,
6533 const uint64_t *data)
6535 S390CPU *cpu = S390_CPU(cs);
6536 CPUS390XState *env = &cpu->env;
6537 int cc_op = data[1];
6539 env->psw.addr = data[0];
6541 /* Update the CC opcode if it is not already up-to-date. */
6542 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6543 env->cc_op = cc_op;
6546 /* Record ILEN. */
6547 env->int_pgm_ilen = data[2];