target/s390x: Emulate CVDG
[qemu/kevin.git] / target / s390x / tcg / translate.c
blobc2fdc920a5040b83a5b9c025d454ebdba6b7ca10
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef HELPER_H
53 /* Information that (most) every instruction needs to manipulate. */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
59 * Define a structure to hold the decoded fields. We'll store each inside
60 * an array indexed by an enum. In order to conserve memory, we'll arrange
61 * for fields that do not exist at the same time to overlap, thus the "C"
62 * for compact. For checking purposes there is an "O" for original index
63 * as well that will be applied to availability bitmaps.
66 enum DisasFieldIndexO {
67 FLD_O_r1,
68 FLD_O_r2,
69 FLD_O_r3,
70 FLD_O_m1,
71 FLD_O_m3,
72 FLD_O_m4,
73 FLD_O_m5,
74 FLD_O_m6,
75 FLD_O_b1,
76 FLD_O_b2,
77 FLD_O_b4,
78 FLD_O_d1,
79 FLD_O_d2,
80 FLD_O_d4,
81 FLD_O_x2,
82 FLD_O_l1,
83 FLD_O_l2,
84 FLD_O_i1,
85 FLD_O_i2,
86 FLD_O_i3,
87 FLD_O_i4,
88 FLD_O_i5,
89 FLD_O_v1,
90 FLD_O_v2,
91 FLD_O_v3,
92 FLD_O_v4,
95 enum DisasFieldIndexC {
96 FLD_C_r1 = 0,
97 FLD_C_m1 = 0,
98 FLD_C_b1 = 0,
99 FLD_C_i1 = 0,
100 FLD_C_v1 = 0,
102 FLD_C_r2 = 1,
103 FLD_C_b2 = 1,
104 FLD_C_i2 = 1,
106 FLD_C_r3 = 2,
107 FLD_C_m3 = 2,
108 FLD_C_i3 = 2,
109 FLD_C_v3 = 2,
111 FLD_C_m4 = 3,
112 FLD_C_b4 = 3,
113 FLD_C_i4 = 3,
114 FLD_C_l1 = 3,
115 FLD_C_v4 = 3,
117 FLD_C_i5 = 4,
118 FLD_C_d1 = 4,
119 FLD_C_m5 = 4,
121 FLD_C_d2 = 5,
122 FLD_C_m6 = 5,
124 FLD_C_d4 = 6,
125 FLD_C_x2 = 6,
126 FLD_C_l2 = 6,
127 FLD_C_v2 = 6,
129 NUM_C_FIELD = 7
132 struct DisasFields {
133 uint64_t raw_insn;
134 unsigned op:8;
135 unsigned op2:8;
136 unsigned presentC:16;
137 unsigned int presentO;
138 int c[NUM_C_FIELD];
141 struct DisasContext {
142 DisasContextBase base;
143 const DisasInsn *insn;
144 TCGOp *insn_start;
145 DisasFields fields;
146 uint64_t ex_value;
148 * During translate_one(), pc_tmp is used to determine the instruction
149 * to be executed after base.pc_next - e.g. next sequential instruction
150 * or a branch target.
152 uint64_t pc_tmp;
153 uint32_t ilen;
154 enum cc_op cc_op;
155 bool exit_to_mainloop;
158 /* Information carried about a condition to be evaluated. */
159 typedef struct {
160 TCGCond cond:8;
161 bool is_64;
162 union {
163 struct { TCGv_i64 a, b; } s64;
164 struct { TCGv_i32 a, b; } s32;
165 } u;
166 } DisasCompare;
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
175 if (s->base.tb->flags & FLAG_MASK_32) {
176 if (s->base.tb->flags & FLAG_MASK_64) {
177 tcg_gen_movi_i64(out, pc);
178 return;
180 pc |= 0x80000000;
182 assert(!(s->base.tb->flags & FLAG_MASK_64));
183 tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
198 void s390x_translate_init(void)
200 int i;
202 psw_addr = tcg_global_mem_new_i64(tcg_env,
203 offsetof(CPUS390XState, psw.addr),
204 "psw_addr");
205 psw_mask = tcg_global_mem_new_i64(tcg_env,
206 offsetof(CPUS390XState, psw.mask),
207 "psw_mask");
208 gbea = tcg_global_mem_new_i64(tcg_env,
209 offsetof(CPUS390XState, gbea),
210 "gbea");
212 cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
213 "cc_op");
214 cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
215 "cc_src");
216 cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
217 "cc_dst");
218 cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
219 "cc_vr");
221 for (i = 0; i < 16; i++) {
222 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223 regs[i] = tcg_global_mem_new(tcg_env,
224 offsetof(CPUS390XState, regs[i]),
225 cpu_reg_names[i]);
229 static inline int vec_full_reg_offset(uint8_t reg)
231 g_assert(reg < 32);
232 return offsetof(CPUS390XState, vregs[reg][0]);
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
237 /* Convert element size (es) - e.g. MO_8 - to bytes */
238 const uint8_t bytes = 1 << es;
239 int offs = enr * bytes;
242 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243 * of the 16 byte vector, on both, little and big endian systems.
245 * Big Endian (target/possible host)
246 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
248 * W: [ 0][ 1] - [ 2][ 3]
249 * DW: [ 0] - [ 1]
251 * Little Endian (possible host)
252 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
254 * W: [ 1][ 0] - [ 3][ 2]
255 * DW: [ 0] - [ 1]
257 * For 16 byte elements, the two 8 byte halves will not form a host
258 * int128 if the host is little endian, since they're in the wrong order.
259 * Some operations (e.g. xor) do not care. For operations like addition,
260 * the two 8 byte elements have to be loaded separately. Let's force all
261 * 16 byte operations to handle it in a special way.
263 g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265 offs ^= (8 - bytes);
266 #endif
267 return offs + vec_full_reg_offset(reg);
270 static inline int freg64_offset(uint8_t reg)
272 g_assert(reg < 16);
273 return vec_reg_offset(reg, 0, MO_64);
276 static inline int freg32_offset(uint8_t reg)
278 g_assert(reg < 16);
279 return vec_reg_offset(reg, 0, MO_32);
282 static TCGv_i64 load_reg(int reg)
284 TCGv_i64 r = tcg_temp_new_i64();
285 tcg_gen_mov_i64(r, regs[reg]);
286 return r;
289 static TCGv_i64 load_freg(int reg)
291 TCGv_i64 r = tcg_temp_new_i64();
293 tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
294 return r;
297 static TCGv_i64 load_freg32_i64(int reg)
299 TCGv_i64 r = tcg_temp_new_i64();
301 tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
302 return r;
305 static TCGv_i128 load_freg_128(int reg)
307 TCGv_i64 h = load_freg(reg);
308 TCGv_i64 l = load_freg(reg + 2);
309 TCGv_i128 r = tcg_temp_new_i128();
311 tcg_gen_concat_i64_i128(r, l, h);
312 return r;
315 static void store_reg(int reg, TCGv_i64 v)
317 tcg_gen_mov_i64(regs[reg], v);
320 static void store_freg(int reg, TCGv_i64 v)
322 tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
325 static void store_reg32_i64(int reg, TCGv_i64 v)
327 /* 32 bit register writes keep the upper half */
328 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
333 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
336 static void store_freg32_i64(int reg, TCGv_i64 v)
338 tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
341 static void update_psw_addr(DisasContext *s)
343 /* psw.addr */
344 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
347 static void per_branch(DisasContext *s, bool to_next)
349 #ifndef CONFIG_USER_ONLY
350 tcg_gen_movi_i64(gbea, s->base.pc_next);
352 if (s->base.tb->flags & FLAG_MASK_PER) {
353 TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354 gen_helper_per_branch(tcg_env, gbea, next_pc);
356 #endif
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360 TCGv_i64 arg1, TCGv_i64 arg2)
362 #ifndef CONFIG_USER_ONLY
363 if (s->base.tb->flags & FLAG_MASK_PER) {
364 TCGLabel *lab = gen_new_label();
365 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
367 tcg_gen_movi_i64(gbea, s->base.pc_next);
368 gen_helper_per_branch(tcg_env, gbea, psw_addr);
370 gen_set_label(lab);
371 } else {
372 TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 #endif
378 static void per_breaking_event(DisasContext *s)
380 tcg_gen_movi_i64(gbea, s->base.pc_next);
383 static void update_cc_op(DisasContext *s)
385 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386 tcg_gen_movi_i32(cc_op, s->cc_op);
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391 uint64_t pc)
393 return (uint64_t)translator_lduw(env, &s->base, pc);
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397 uint64_t pc)
399 return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
402 static int get_mem_index(DisasContext *s)
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 g_assert_not_reached();
420 break;
422 #endif
425 static void gen_exception(int excp)
427 gen_helper_exception(tcg_env, tcg_constant_i32(excp));
430 static void gen_program_exception(DisasContext *s, int code)
432 /* Remember what pgm exception this was. */
433 tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
434 offsetof(CPUS390XState, int_pgm_code));
436 tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
437 offsetof(CPUS390XState, int_pgm_ilen));
439 /* update the psw */
440 update_psw_addr(s);
442 /* Save off cc. */
443 update_cc_op(s);
445 /* Trigger exception. */
446 gen_exception(EXCP_PGM);
449 static inline void gen_illegal_opcode(DisasContext *s)
451 gen_program_exception(s, PGM_OPERATION);
454 static inline void gen_data_exception(uint8_t dxc)
456 gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
459 static inline void gen_trap(DisasContext *s)
461 /* Set DXC to 0xff */
462 gen_data_exception(0xff);
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466 int64_t imm)
468 tcg_gen_addi_i64(dst, src, imm);
469 if (!(s->base.tb->flags & FLAG_MASK_64)) {
470 if (s->base.tb->flags & FLAG_MASK_32) {
471 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472 } else {
473 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
480 TCGv_i64 tmp = tcg_temp_new_i64();
483 * Note that d2 is limited to 20 bits, signed. If we crop negative
484 * displacements early we create larger immediate addends.
486 if (b2 && x2) {
487 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489 } else if (b2) {
490 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491 } else if (x2) {
492 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494 if (s->base.tb->flags & FLAG_MASK_32) {
495 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496 } else {
497 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
499 } else {
500 tcg_gen_movi_i64(tmp, d2);
503 return tmp;
506 static inline bool live_cc_data(DisasContext *s)
508 return (s->cc_op != CC_OP_DYNAMIC
509 && s->cc_op != CC_OP_STATIC
510 && s->cc_op > 3);
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
515 if (live_cc_data(s)) {
516 tcg_gen_discard_i64(cc_src);
517 tcg_gen_discard_i64(cc_dst);
518 tcg_gen_discard_i64(cc_vr);
520 s->cc_op = CC_OP_CONST0 + val;
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
525 if (live_cc_data(s)) {
526 tcg_gen_discard_i64(cc_src);
527 tcg_gen_discard_i64(cc_vr);
529 tcg_gen_mov_i64(cc_dst, dst);
530 s->cc_op = op;
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534 TCGv_i64 dst)
536 if (live_cc_data(s)) {
537 tcg_gen_discard_i64(cc_vr);
539 tcg_gen_mov_i64(cc_src, src);
540 tcg_gen_mov_i64(cc_dst, dst);
541 s->cc_op = op;
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545 TCGv_i64 dst, TCGv_i64 vr)
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 tcg_gen_mov_i64(cc_vr, vr);
550 s->cc_op = op;
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
555 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
561 if (live_cc_data(s)) {
562 tcg_gen_discard_i64(cc_src);
563 tcg_gen_discard_i64(cc_dst);
564 tcg_gen_discard_i64(cc_vr);
566 s->cc_op = CC_OP_STATIC;
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
572 TCGv_i32 local_cc_op = NULL;
573 TCGv_i64 dummy = NULL;
575 switch (s->cc_op) {
576 default:
577 dummy = tcg_constant_i64(0);
578 /* FALLTHRU */
579 case CC_OP_ADD_64:
580 case CC_OP_SUB_64:
581 case CC_OP_ADD_32:
582 case CC_OP_SUB_32:
583 local_cc_op = tcg_constant_i32(s->cc_op);
584 break;
585 case CC_OP_CONST0:
586 case CC_OP_CONST1:
587 case CC_OP_CONST2:
588 case CC_OP_CONST3:
589 case CC_OP_STATIC:
590 case CC_OP_DYNAMIC:
591 break;
594 switch (s->cc_op) {
595 case CC_OP_CONST0:
596 case CC_OP_CONST1:
597 case CC_OP_CONST2:
598 case CC_OP_CONST3:
599 /* s->cc_op is the cc value */
600 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601 break;
602 case CC_OP_STATIC:
603 /* env->cc_op already is the cc value */
604 break;
605 case CC_OP_NZ:
606 tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607 tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608 break;
609 case CC_OP_ABS_64:
610 case CC_OP_NABS_64:
611 case CC_OP_ABS_32:
612 case CC_OP_NABS_32:
613 case CC_OP_LTGT0_32:
614 case CC_OP_LTGT0_64:
615 case CC_OP_COMP_32:
616 case CC_OP_COMP_64:
617 case CC_OP_NZ_F32:
618 case CC_OP_NZ_F64:
619 case CC_OP_FLOGR:
620 case CC_OP_LCBB:
621 case CC_OP_MULS_32:
622 /* 1 argument */
623 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
624 break;
625 case CC_OP_ADDU:
626 case CC_OP_ICM:
627 case CC_OP_LTGT_32:
628 case CC_OP_LTGT_64:
629 case CC_OP_LTUGTU_32:
630 case CC_OP_LTUGTU_64:
631 case CC_OP_TM_32:
632 case CC_OP_TM_64:
633 case CC_OP_SLA:
634 case CC_OP_SUBU:
635 case CC_OP_NZ_F128:
636 case CC_OP_VC:
637 case CC_OP_MULS_64:
638 /* 2 arguments */
639 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
640 break;
641 case CC_OP_ADD_64:
642 case CC_OP_SUB_64:
643 case CC_OP_ADD_32:
644 case CC_OP_SUB_32:
645 /* 3 arguments */
646 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
647 break;
648 case CC_OP_DYNAMIC:
649 /* unknown operation - assume 3 arguments and cc_op in env */
650 gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
651 break;
652 default:
653 g_assert_not_reached();
656 /* We now have cc in cc_op as constant */
657 set_cc_static(s);
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
662 if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663 return false;
665 return translator_use_goto_tb(&s->base, dest);
668 static void account_noninline_branch(DisasContext *s, int cc_op)
670 #ifdef DEBUG_INLINE_BRANCHES
671 inline_branch_miss[cc_op]++;
672 #endif
675 static void account_inline_branch(DisasContext *s, int cc_op)
677 #ifdef DEBUG_INLINE_BRANCHES
678 inline_branch_hit[cc_op]++;
679 #endif
682 /* Table of mask values to comparison codes, given a comparison as input.
683 For such, CC=3 should not be possible. */
684 static const TCGCond ltgt_cond[16] = {
685 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
686 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
687 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
688 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
689 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
690 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
691 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
692 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
695 /* Table of mask values to comparison codes, given a logic op as input.
696 For such, only CC=0 and CC=1 should be possible. */
697 static const TCGCond nz_cond[16] = {
698 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
699 TCG_COND_NEVER, TCG_COND_NEVER,
700 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
701 TCG_COND_NE, TCG_COND_NE,
702 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
703 TCG_COND_EQ, TCG_COND_EQ,
704 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
705 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709 details required to generate a TCG comparison. */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
712 TCGCond cond;
713 enum cc_op old_cc_op = s->cc_op;
715 if (mask == 15 || mask == 0) {
716 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717 c->u.s32.a = cc_op;
718 c->u.s32.b = cc_op;
719 c->is_64 = false;
720 return;
723 /* Find the TCG condition for the mask + cc op. */
724 switch (old_cc_op) {
725 case CC_OP_LTGT0_32:
726 case CC_OP_LTGT0_64:
727 case CC_OP_LTGT_32:
728 case CC_OP_LTGT_64:
729 cond = ltgt_cond[mask];
730 if (cond == TCG_COND_NEVER) {
731 goto do_dynamic;
733 account_inline_branch(s, old_cc_op);
734 break;
736 case CC_OP_LTUGTU_32:
737 case CC_OP_LTUGTU_64:
738 cond = tcg_unsigned_cond(ltgt_cond[mask]);
739 if (cond == TCG_COND_NEVER) {
740 goto do_dynamic;
742 account_inline_branch(s, old_cc_op);
743 break;
745 case CC_OP_NZ:
746 cond = nz_cond[mask];
747 if (cond == TCG_COND_NEVER) {
748 goto do_dynamic;
750 account_inline_branch(s, old_cc_op);
751 break;
753 case CC_OP_TM_32:
754 case CC_OP_TM_64:
755 switch (mask) {
756 case 8:
757 cond = TCG_COND_EQ;
758 break;
759 case 4 | 2 | 1:
760 cond = TCG_COND_NE;
761 break;
762 default:
763 goto do_dynamic;
765 account_inline_branch(s, old_cc_op);
766 break;
768 case CC_OP_ICM:
769 switch (mask) {
770 case 8:
771 cond = TCG_COND_EQ;
772 break;
773 case 4 | 2 | 1:
774 case 4 | 2:
775 cond = TCG_COND_NE;
776 break;
777 default:
778 goto do_dynamic;
780 account_inline_branch(s, old_cc_op);
781 break;
783 case CC_OP_FLOGR:
784 switch (mask & 0xa) {
785 case 8: /* src == 0 -> no one bit found */
786 cond = TCG_COND_EQ;
787 break;
788 case 2: /* src != 0 -> one bit found */
789 cond = TCG_COND_NE;
790 break;
791 default:
792 goto do_dynamic;
794 account_inline_branch(s, old_cc_op);
795 break;
797 case CC_OP_ADDU:
798 case CC_OP_SUBU:
799 switch (mask) {
800 case 8 | 2: /* result == 0 */
801 cond = TCG_COND_EQ;
802 break;
803 case 4 | 1: /* result != 0 */
804 cond = TCG_COND_NE;
805 break;
806 case 8 | 4: /* !carry (borrow) */
807 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808 break;
809 case 2 | 1: /* carry (!borrow) */
810 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811 break;
812 default:
813 goto do_dynamic;
815 account_inline_branch(s, old_cc_op);
816 break;
818 default:
819 do_dynamic:
820 /* Calculate cc value. */
821 gen_op_calc_cc(s);
822 /* FALLTHRU */
824 case CC_OP_STATIC:
825 /* Jump based on CC. We'll load up the real cond below;
826 the assignment here merely avoids a compiler warning. */
827 account_noninline_branch(s, old_cc_op);
828 old_cc_op = CC_OP_STATIC;
829 cond = TCG_COND_NEVER;
830 break;
833 /* Load up the arguments of the comparison. */
834 c->is_64 = true;
835 switch (old_cc_op) {
836 case CC_OP_LTGT0_32:
837 c->is_64 = false;
838 c->u.s32.a = tcg_temp_new_i32();
839 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840 c->u.s32.b = tcg_constant_i32(0);
841 break;
842 case CC_OP_LTGT_32:
843 case CC_OP_LTUGTU_32:
844 c->is_64 = false;
845 c->u.s32.a = tcg_temp_new_i32();
846 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847 c->u.s32.b = tcg_temp_new_i32();
848 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849 break;
851 case CC_OP_LTGT0_64:
852 case CC_OP_NZ:
853 case CC_OP_FLOGR:
854 c->u.s64.a = cc_dst;
855 c->u.s64.b = tcg_constant_i64(0);
856 break;
857 case CC_OP_LTGT_64:
858 case CC_OP_LTUGTU_64:
859 c->u.s64.a = cc_src;
860 c->u.s64.b = cc_dst;
861 break;
863 case CC_OP_TM_32:
864 case CC_OP_TM_64:
865 case CC_OP_ICM:
866 c->u.s64.a = tcg_temp_new_i64();
867 c->u.s64.b = tcg_constant_i64(0);
868 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
869 break;
871 case CC_OP_ADDU:
872 case CC_OP_SUBU:
873 c->is_64 = true;
874 c->u.s64.b = tcg_constant_i64(0);
875 switch (mask) {
876 case 8 | 2:
877 case 4 | 1: /* result */
878 c->u.s64.a = cc_dst;
879 break;
880 case 8 | 4:
881 case 2 | 1: /* carry */
882 c->u.s64.a = cc_src;
883 break;
884 default:
885 g_assert_not_reached();
887 break;
889 case CC_OP_STATIC:
890 c->is_64 = false;
891 c->u.s32.a = cc_op;
892 switch (mask) {
893 case 0x8 | 0x4 | 0x2: /* cc != 3 */
894 cond = TCG_COND_NE;
895 c->u.s32.b = tcg_constant_i32(3);
896 break;
897 case 0x8 | 0x4 | 0x1: /* cc != 2 */
898 cond = TCG_COND_NE;
899 c->u.s32.b = tcg_constant_i32(2);
900 break;
901 case 0x8 | 0x2 | 0x1: /* cc != 1 */
902 cond = TCG_COND_NE;
903 c->u.s32.b = tcg_constant_i32(1);
904 break;
905 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906 cond = TCG_COND_EQ;
907 c->u.s32.a = tcg_temp_new_i32();
908 c->u.s32.b = tcg_constant_i32(0);
909 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910 break;
911 case 0x8 | 0x4: /* cc < 2 */
912 cond = TCG_COND_LTU;
913 c->u.s32.b = tcg_constant_i32(2);
914 break;
915 case 0x8: /* cc == 0 */
916 cond = TCG_COND_EQ;
917 c->u.s32.b = tcg_constant_i32(0);
918 break;
919 case 0x4 | 0x2 | 0x1: /* cc != 0 */
920 cond = TCG_COND_NE;
921 c->u.s32.b = tcg_constant_i32(0);
922 break;
923 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924 cond = TCG_COND_NE;
925 c->u.s32.a = tcg_temp_new_i32();
926 c->u.s32.b = tcg_constant_i32(0);
927 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928 break;
929 case 0x4: /* cc == 1 */
930 cond = TCG_COND_EQ;
931 c->u.s32.b = tcg_constant_i32(1);
932 break;
933 case 0x2 | 0x1: /* cc > 1 */
934 cond = TCG_COND_GTU;
935 c->u.s32.b = tcg_constant_i32(1);
936 break;
937 case 0x2: /* cc == 2 */
938 cond = TCG_COND_EQ;
939 c->u.s32.b = tcg_constant_i32(2);
940 break;
941 case 0x1: /* cc == 3 */
942 cond = TCG_COND_EQ;
943 c->u.s32.b = tcg_constant_i32(3);
944 break;
945 default:
946 /* CC is masked by something else: (8 >> cc) & mask. */
947 cond = TCG_COND_NE;
948 c->u.s32.a = tcg_temp_new_i32();
949 c->u.s32.b = tcg_constant_i32(0);
950 tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
951 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952 break;
954 break;
956 default:
957 abort();
959 c->cond = cond;
962 /* ====================================================================== */
963 /* Define the insn format enumeration. */
964 #define F0(N) FMT_##N,
965 #define F1(N, X1) F0(N)
966 #define F2(N, X1, X2) F0(N)
967 #define F3(N, X1, X2, X3) F0(N)
968 #define F4(N, X1, X2, X3, X4) F0(N)
969 #define F5(N, X1, X2, X3, X4, X5) F0(N)
970 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
972 typedef enum {
973 #include "insn-format.h.inc"
974 } DisasFormat;
976 #undef F0
977 #undef F1
978 #undef F2
979 #undef F3
980 #undef F4
981 #undef F5
982 #undef F6
984 /* This is the way fields are to be accessed out of DisasFields. */
985 #define have_field(S, F) have_field1((S), FLD_O_##F)
986 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
988 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
990 return (s->fields.presentO >> c) & 1;
993 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
994 enum DisasFieldIndexC c)
996 assert(have_field1(s, o));
997 return s->fields.c[c];
1000 /* Describe the layout of each field in each format. */
1001 typedef struct DisasField {
1002 unsigned int beg:8;
1003 unsigned int size:8;
1004 unsigned int type:2;
1005 unsigned int indexC:6;
1006 enum DisasFieldIndexO indexO:8;
1007 } DisasField;
1009 typedef struct DisasFormatInfo {
1010 DisasField op[NUM_C_FIELD];
1011 } DisasFormatInfo;
1013 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1014 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1015 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1016 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1017 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1020 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1021 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1022 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1025 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1026 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1027 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1029 #define F0(N) { { } },
1030 #define F1(N, X1) { { X1 } },
1031 #define F2(N, X1, X2) { { X1, X2 } },
1032 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1033 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1034 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1035 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1037 static const DisasFormatInfo format_info[] = {
1038 #include "insn-format.h.inc"
1041 #undef F0
1042 #undef F1
1043 #undef F2
1044 #undef F3
1045 #undef F4
1046 #undef F5
1047 #undef F6
1048 #undef R
1049 #undef M
1050 #undef V
1051 #undef BD
1052 #undef BXD
1053 #undef BDL
1054 #undef BXDL
1055 #undef I
1056 #undef L
1058 /* Generally, we'll extract operands into this structures, operate upon
1059 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1060 of routines below for more details. */
1061 typedef struct {
1062 TCGv_i64 out, out2, in1, in2;
1063 TCGv_i64 addr1;
1064 TCGv_i128 out_128, in1_128, in2_128;
1065 } DisasOps;
1067 /* Instructions can place constraints on their operands, raising specification
1068 exceptions if they are violated. To make this easy to automate, each "in1",
1069 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1070 of the following, or 0. To make this easy to document, we'll put the
1071 SPEC_<name> defines next to <name>. */
1073 #define SPEC_r1_even 1
1074 #define SPEC_r2_even 2
1075 #define SPEC_r3_even 4
1076 #define SPEC_r1_f128 8
1077 #define SPEC_r2_f128 16
1079 /* Return values from translate_one, indicating the state of the TB. */
1081 /* We are not using a goto_tb (for whatever reason), but have updated
1082 the PC (for whatever reason), so there's no need to do it again on
1083 exiting the TB. */
1084 #define DISAS_PC_UPDATED DISAS_TARGET_0
1086 /* We have updated the PC and CC values. */
1087 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1090 /* Instruction flags */
1091 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1092 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1094 #define IF_BFP 0x0008 /* binary floating point instruction */
1095 #define IF_DFP 0x0010 /* decimal floating point instruction */
1096 #define IF_PRIV 0x0020 /* privileged instruction */
1097 #define IF_VEC 0x0040 /* vector instruction */
1098 #define IF_IO 0x0080 /* input/output instruction */
1100 struct DisasInsn {
1101 unsigned opc:16;
1102 unsigned flags:16;
1103 DisasFormat fmt:8;
1104 unsigned fac:8;
1105 unsigned spec:8;
1107 const char *name;
1109 /* Pre-process arguments before HELP_OP. */
1110 void (*help_in1)(DisasContext *, DisasOps *);
1111 void (*help_in2)(DisasContext *, DisasOps *);
1112 void (*help_prep)(DisasContext *, DisasOps *);
1115 * Post-process output after HELP_OP.
1116 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1118 void (*help_wout)(DisasContext *, DisasOps *);
1119 void (*help_cout)(DisasContext *, DisasOps *);
1121 /* Implement the operation itself. */
1122 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1124 uint64_t data;
1127 /* ====================================================================== */
1128 /* Miscellaneous helpers, used by several operations. */
1130 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1132 if (dest == s->pc_tmp) {
1133 per_branch(s, true);
1134 return DISAS_NEXT;
1136 if (use_goto_tb(s, dest)) {
1137 update_cc_op(s);
1138 per_breaking_event(s);
1139 tcg_gen_goto_tb(0);
1140 tcg_gen_movi_i64(psw_addr, dest);
1141 tcg_gen_exit_tb(s->base.tb, 0);
1142 return DISAS_NORETURN;
1143 } else {
1144 tcg_gen_movi_i64(psw_addr, dest);
1145 per_branch(s, false);
1146 return DISAS_PC_UPDATED;
1150 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1151 bool is_imm, int imm, TCGv_i64 cdest)
1153 DisasJumpType ret;
1154 uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1155 TCGLabel *lab;
1157 /* Take care of the special cases first. */
1158 if (c->cond == TCG_COND_NEVER) {
1159 ret = DISAS_NEXT;
1160 goto egress;
1162 if (is_imm) {
1163 if (dest == s->pc_tmp) {
1164 /* Branch to next. */
1165 per_branch(s, true);
1166 ret = DISAS_NEXT;
1167 goto egress;
1169 if (c->cond == TCG_COND_ALWAYS) {
1170 ret = help_goto_direct(s, dest);
1171 goto egress;
1173 } else {
1174 if (!cdest) {
1175 /* E.g. bcr %r0 -> no branch. */
1176 ret = DISAS_NEXT;
1177 goto egress;
1179 if (c->cond == TCG_COND_ALWAYS) {
1180 tcg_gen_mov_i64(psw_addr, cdest);
1181 per_branch(s, false);
1182 ret = DISAS_PC_UPDATED;
1183 goto egress;
1187 if (use_goto_tb(s, s->pc_tmp)) {
1188 if (is_imm && use_goto_tb(s, dest)) {
1189 /* Both exits can use goto_tb. */
1190 update_cc_op(s);
1192 lab = gen_new_label();
1193 if (c->is_64) {
1194 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195 } else {
1196 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1199 /* Branch not taken. */
1200 tcg_gen_goto_tb(0);
1201 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1202 tcg_gen_exit_tb(s->base.tb, 0);
1204 /* Branch taken. */
1205 gen_set_label(lab);
1206 per_breaking_event(s);
1207 tcg_gen_goto_tb(1);
1208 tcg_gen_movi_i64(psw_addr, dest);
1209 tcg_gen_exit_tb(s->base.tb, 1);
1211 ret = DISAS_NORETURN;
1212 } else {
1213 /* Fallthru can use goto_tb, but taken branch cannot. */
1214 /* Store taken branch destination before the brcond. This
1215 avoids having to allocate a new local temp to hold it.
1216 We'll overwrite this in the not taken case anyway. */
1217 if (!is_imm) {
1218 tcg_gen_mov_i64(psw_addr, cdest);
1221 lab = gen_new_label();
1222 if (c->is_64) {
1223 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224 } else {
1225 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1228 /* Branch not taken. */
1229 update_cc_op(s);
1230 tcg_gen_goto_tb(0);
1231 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1232 tcg_gen_exit_tb(s->base.tb, 0);
1234 gen_set_label(lab);
1235 if (is_imm) {
1236 tcg_gen_movi_i64(psw_addr, dest);
1238 per_breaking_event(s);
1239 ret = DISAS_PC_UPDATED;
1241 } else {
1242 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1243 Most commonly we're single-stepping or some other condition that
1244 disables all use of goto_tb. Just update the PC and exit. */
1246 TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1247 if (is_imm) {
1248 cdest = tcg_constant_i64(dest);
1251 if (c->is_64) {
1252 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1253 cdest, next);
1254 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1255 } else {
1256 TCGv_i32 t0 = tcg_temp_new_i32();
1257 TCGv_i64 t1 = tcg_temp_new_i64();
1258 TCGv_i64 z = tcg_constant_i64(0);
1259 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1260 tcg_gen_extu_i32_i64(t1, t0);
1261 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1262 per_branch_cond(s, TCG_COND_NE, t1, z);
1265 ret = DISAS_PC_UPDATED;
1268 egress:
1269 return ret;
1272 /* ====================================================================== */
1273 /* The operations. These perform the bulk of the work for any insn,
1274 usually after the operands have been loaded and output initialized. */
1276 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1278 tcg_gen_abs_i64(o->out, o->in2);
1279 return DISAS_NEXT;
1282 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1284 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1285 return DISAS_NEXT;
1288 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1290 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1291 return DISAS_NEXT;
1294 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1296 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1297 tcg_gen_mov_i64(o->out2, o->in2);
1298 return DISAS_NEXT;
1301 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1303 tcg_gen_add_i64(o->out, o->in1, o->in2);
1304 return DISAS_NEXT;
1307 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1309 tcg_gen_movi_i64(cc_src, 0);
1310 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1311 return DISAS_NEXT;
1314 /* Compute carry into cc_src. */
1315 static void compute_carry(DisasContext *s)
1317 switch (s->cc_op) {
1318 case CC_OP_ADDU:
1319 /* The carry value is already in cc_src (1,0). */
1320 break;
1321 case CC_OP_SUBU:
1322 tcg_gen_addi_i64(cc_src, cc_src, 1);
1323 break;
1324 default:
1325 gen_op_calc_cc(s);
1326 /* fall through */
1327 case CC_OP_STATIC:
1328 /* The carry flag is the msb of CC; compute into cc_src. */
1329 tcg_gen_extu_i32_i64(cc_src, cc_op);
1330 tcg_gen_shri_i64(cc_src, cc_src, 1);
1331 break;
1335 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1337 compute_carry(s);
1338 tcg_gen_add_i64(o->out, o->in1, o->in2);
1339 tcg_gen_add_i64(o->out, o->out, cc_src);
1340 return DISAS_NEXT;
1343 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1345 compute_carry(s);
1347 TCGv_i64 zero = tcg_constant_i64(0);
1348 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1349 tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1351 return DISAS_NEXT;
1354 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1356 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1358 o->in1 = tcg_temp_new_i64();
1359 if (non_atomic) {
1360 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1361 } else {
1362 /* Perform the atomic addition in memory. */
1363 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1364 s->insn->data);
1367 /* Recompute also for atomic case: needed for setting CC. */
1368 tcg_gen_add_i64(o->out, o->in1, o->in2);
1370 if (non_atomic) {
1371 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1373 return DISAS_NEXT;
1376 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1378 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1380 o->in1 = tcg_temp_new_i64();
1381 if (non_atomic) {
1382 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1383 } else {
1384 /* Perform the atomic addition in memory. */
1385 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1386 s->insn->data);
1389 /* Recompute also for atomic case: needed for setting CC. */
1390 tcg_gen_movi_i64(cc_src, 0);
1391 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1393 if (non_atomic) {
1394 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1396 return DISAS_NEXT;
1399 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1401 gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1402 return DISAS_NEXT;
1405 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1407 gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1408 return DISAS_NEXT;
1411 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1413 gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1414 return DISAS_NEXT;
1417 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1419 tcg_gen_and_i64(o->out, o->in1, o->in2);
1420 return DISAS_NEXT;
1423 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1425 int shift = s->insn->data & 0xff;
1426 int size = s->insn->data >> 8;
1427 uint64_t mask = ((1ull << size) - 1) << shift;
1428 TCGv_i64 t = tcg_temp_new_i64();
1430 tcg_gen_shli_i64(t, o->in2, shift);
1431 tcg_gen_ori_i64(t, t, ~mask);
1432 tcg_gen_and_i64(o->out, o->in1, t);
1434 /* Produce the CC from only the bits manipulated. */
1435 tcg_gen_andi_i64(cc_dst, o->out, mask);
1436 set_cc_nz_u64(s, cc_dst);
1437 return DISAS_NEXT;
1440 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1442 tcg_gen_andc_i64(o->out, o->in1, o->in2);
1443 return DISAS_NEXT;
1446 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1448 tcg_gen_orc_i64(o->out, o->in1, o->in2);
1449 return DISAS_NEXT;
1452 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1454 tcg_gen_nand_i64(o->out, o->in1, o->in2);
1455 return DISAS_NEXT;
1458 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1460 tcg_gen_nor_i64(o->out, o->in1, o->in2);
1461 return DISAS_NEXT;
1464 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1466 tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1467 return DISAS_NEXT;
1470 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1472 o->in1 = tcg_temp_new_i64();
1474 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1475 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1476 } else {
1477 /* Perform the atomic operation in memory. */
1478 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479 s->insn->data);
1482 /* Recompute also for atomic case: needed for setting CC. */
1483 tcg_gen_and_i64(o->out, o->in1, o->in2);
1485 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1486 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1488 return DISAS_NEXT;
1491 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1493 pc_to_link_info(o->out, s, s->pc_tmp);
1494 if (o->in2) {
1495 tcg_gen_mov_i64(psw_addr, o->in2);
1496 per_branch(s, false);
1497 return DISAS_PC_UPDATED;
1498 } else {
1499 return DISAS_NEXT;
1503 static void save_link_info(DisasContext *s, DisasOps *o)
1505 TCGv_i64 t;
1507 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1508 pc_to_link_info(o->out, s, s->pc_tmp);
1509 return;
1511 gen_op_calc_cc(s);
1512 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1513 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1514 t = tcg_temp_new_i64();
1515 tcg_gen_shri_i64(t, psw_mask, 16);
1516 tcg_gen_andi_i64(t, t, 0x0f000000);
1517 tcg_gen_or_i64(o->out, o->out, t);
1518 tcg_gen_extu_i32_i64(t, cc_op);
1519 tcg_gen_shli_i64(t, t, 28);
1520 tcg_gen_or_i64(o->out, o->out, t);
1523 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1525 save_link_info(s, o);
1526 if (o->in2) {
1527 tcg_gen_mov_i64(psw_addr, o->in2);
1528 per_branch(s, false);
1529 return DISAS_PC_UPDATED;
1530 } else {
1531 return DISAS_NEXT;
1536 * Disassemble the target of a branch. The results are returned in a form
1537 * suitable for passing into help_branch():
1539 * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1540 * branches, whose DisasContext *S contains the relative immediate field RI,
1541 * are considered fixed. All the other branches are considered computed.
1542 * - int IMM is the value of RI.
1543 * - TCGv_i64 CDEST is the address of the computed target.
1545 #define disas_jdest(s, ri, is_imm, imm, cdest) do { \
1546 if (have_field(s, ri)) { \
1547 if (unlikely(s->ex_value)) { \
1548 cdest = tcg_temp_new_i64(); \
1549 tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1550 tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \
1551 is_imm = false; \
1552 } else { \
1553 is_imm = true; \
1555 } else { \
1556 is_imm = false; \
1558 imm = is_imm ? get_field(s, ri) : 0; \
1559 } while (false)
1561 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1563 DisasCompare c;
1564 bool is_imm;
1565 int imm;
1567 pc_to_link_info(o->out, s, s->pc_tmp);
1569 disas_jdest(s, i2, is_imm, imm, o->in2);
1570 disas_jcc(s, &c, 0xf);
1571 return help_branch(s, &c, is_imm, imm, o->in2);
1574 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1576 int m1 = get_field(s, m1);
1577 DisasCompare c;
1578 bool is_imm;
1579 int imm;
1581 /* BCR with R2 = 0 causes no branching */
1582 if (have_field(s, r2) && get_field(s, r2) == 0) {
1583 if (m1 == 14) {
1584 /* Perform serialization */
1585 /* FIXME: check for fast-BCR-serialization facility */
1586 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1588 if (m1 == 15) {
1589 /* Perform serialization */
1590 /* FIXME: perform checkpoint-synchronisation */
1591 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1593 return DISAS_NEXT;
1596 disas_jdest(s, i2, is_imm, imm, o->in2);
1597 disas_jcc(s, &c, m1);
1598 return help_branch(s, &c, is_imm, imm, o->in2);
1601 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1603 int r1 = get_field(s, r1);
1604 DisasCompare c;
1605 bool is_imm;
1606 TCGv_i64 t;
1607 int imm;
1609 c.cond = TCG_COND_NE;
1610 c.is_64 = false;
1612 t = tcg_temp_new_i64();
1613 tcg_gen_subi_i64(t, regs[r1], 1);
1614 store_reg32_i64(r1, t);
1615 c.u.s32.a = tcg_temp_new_i32();
1616 c.u.s32.b = tcg_constant_i32(0);
1617 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1619 disas_jdest(s, i2, is_imm, imm, o->in2);
1620 return help_branch(s, &c, is_imm, imm, o->in2);
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1625 int r1 = get_field(s, r1);
1626 int imm = get_field(s, i2);
1627 DisasCompare c;
1628 TCGv_i64 t;
1630 c.cond = TCG_COND_NE;
1631 c.is_64 = false;
1633 t = tcg_temp_new_i64();
1634 tcg_gen_shri_i64(t, regs[r1], 32);
1635 tcg_gen_subi_i64(t, t, 1);
1636 store_reg32h_i64(r1, t);
1637 c.u.s32.a = tcg_temp_new_i32();
1638 c.u.s32.b = tcg_constant_i32(0);
1639 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1641 return help_branch(s, &c, 1, imm, o->in2);
1644 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1646 int r1 = get_field(s, r1);
1647 DisasCompare c;
1648 bool is_imm;
1649 int imm;
1651 c.cond = TCG_COND_NE;
1652 c.is_64 = true;
1654 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1655 c.u.s64.a = regs[r1];
1656 c.u.s64.b = tcg_constant_i64(0);
1658 disas_jdest(s, i2, is_imm, imm, o->in2);
1659 return help_branch(s, &c, is_imm, imm, o->in2);
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1664 int r1 = get_field(s, r1);
1665 int r3 = get_field(s, r3);
1666 DisasCompare c;
1667 bool is_imm;
1668 TCGv_i64 t;
1669 int imm;
1671 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672 c.is_64 = false;
1674 t = tcg_temp_new_i64();
1675 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1676 c.u.s32.a = tcg_temp_new_i32();
1677 c.u.s32.b = tcg_temp_new_i32();
1678 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1680 store_reg32_i64(r1, t);
1682 disas_jdest(s, i2, is_imm, imm, o->in2);
1683 return help_branch(s, &c, is_imm, imm, o->in2);
1686 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1688 int r1 = get_field(s, r1);
1689 int r3 = get_field(s, r3);
1690 DisasCompare c;
1691 bool is_imm;
1692 int imm;
1694 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1695 c.is_64 = true;
1697 if (r1 == (r3 | 1)) {
1698 c.u.s64.b = load_reg(r3 | 1);
1699 } else {
1700 c.u.s64.b = regs[r3 | 1];
1703 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1704 c.u.s64.a = regs[r1];
1706 disas_jdest(s, i2, is_imm, imm, o->in2);
1707 return help_branch(s, &c, is_imm, imm, o->in2);
1710 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1712 int imm, m3 = get_field(s, m3);
1713 bool is_imm;
1714 DisasCompare c;
1716 c.cond = ltgt_cond[m3];
1717 if (s->insn->data) {
1718 c.cond = tcg_unsigned_cond(c.cond);
1720 c.is_64 = true;
1721 c.u.s64.a = o->in1;
1722 c.u.s64.b = o->in2;
1724 o->out = NULL;
1725 disas_jdest(s, i4, is_imm, imm, o->out);
1726 if (!is_imm && !o->out) {
1727 imm = 0;
1728 o->out = get_address(s, 0, get_field(s, b4),
1729 get_field(s, d4));
1732 return help_branch(s, &c, is_imm, imm, o->out);
1735 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1737 gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1738 set_cc_static(s);
1739 return DISAS_NEXT;
1742 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1744 gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1745 set_cc_static(s);
1746 return DISAS_NEXT;
1749 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1751 gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1752 set_cc_static(s);
1753 return DISAS_NEXT;
1756 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1757 bool m4_with_fpe)
1759 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1760 uint8_t m3 = get_field(s, m3);
1761 uint8_t m4 = get_field(s, m4);
1763 /* m3 field was introduced with FPE */
1764 if (!fpe && m3_with_fpe) {
1765 m3 = 0;
1767 /* m4 field was introduced with FPE */
1768 if (!fpe && m4_with_fpe) {
1769 m4 = 0;
1772 /* Check for valid rounding modes. Mode 3 was introduced later. */
1773 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1774 gen_program_exception(s, PGM_SPECIFICATION);
1775 return NULL;
1778 return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1781 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1783 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1785 if (!m34) {
1786 return DISAS_NORETURN;
1788 gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1789 set_cc_static(s);
1790 return DISAS_NEXT;
1793 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1795 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1797 if (!m34) {
1798 return DISAS_NORETURN;
1800 gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1801 set_cc_static(s);
1802 return DISAS_NEXT;
1805 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1807 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1809 if (!m34) {
1810 return DISAS_NORETURN;
1812 gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1813 set_cc_static(s);
1814 return DISAS_NEXT;
1817 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1819 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1821 if (!m34) {
1822 return DISAS_NORETURN;
1824 gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1825 set_cc_static(s);
1826 return DISAS_NEXT;
1829 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1831 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1833 if (!m34) {
1834 return DISAS_NORETURN;
1836 gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1837 set_cc_static(s);
1838 return DISAS_NEXT;
1841 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1843 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1845 if (!m34) {
1846 return DISAS_NORETURN;
1848 gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1849 set_cc_static(s);
1850 return DISAS_NEXT;
1853 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1855 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1857 if (!m34) {
1858 return DISAS_NORETURN;
1860 gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1861 set_cc_static(s);
1862 return DISAS_NEXT;
1865 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1867 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1869 if (!m34) {
1870 return DISAS_NORETURN;
1872 gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1873 set_cc_static(s);
1874 return DISAS_NEXT;
1877 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1879 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1881 if (!m34) {
1882 return DISAS_NORETURN;
1884 gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1885 set_cc_static(s);
1886 return DISAS_NEXT;
1889 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1891 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1893 if (!m34) {
1894 return DISAS_NORETURN;
1896 gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1897 set_cc_static(s);
1898 return DISAS_NEXT;
1901 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1903 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1905 if (!m34) {
1906 return DISAS_NORETURN;
1908 gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1909 set_cc_static(s);
1910 return DISAS_NEXT;
1913 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1915 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1917 if (!m34) {
1918 return DISAS_NORETURN;
1920 gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1921 set_cc_static(s);
1922 return DISAS_NEXT;
1925 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1927 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1929 if (!m34) {
1930 return DISAS_NORETURN;
1932 gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1933 return DISAS_NEXT;
1936 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1938 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1940 if (!m34) {
1941 return DISAS_NORETURN;
1943 gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1944 return DISAS_NEXT;
1947 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1949 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1951 if (!m34) {
1952 return DISAS_NORETURN;
1954 gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1955 return DISAS_NEXT;
1958 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1960 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1962 if (!m34) {
1963 return DISAS_NORETURN;
1965 gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1966 return DISAS_NEXT;
1969 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1971 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1973 if (!m34) {
1974 return DISAS_NORETURN;
1976 gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1977 return DISAS_NEXT;
1980 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1982 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1984 if (!m34) {
1985 return DISAS_NORETURN;
1987 gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1988 return DISAS_NEXT;
1991 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1993 int r2 = get_field(s, r2);
1994 TCGv_i128 pair = tcg_temp_new_i128();
1995 TCGv_i64 len = tcg_temp_new_i64();
1997 gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1998 set_cc_static(s);
1999 tcg_gen_extr_i128_i64(o->out, len, pair);
2001 tcg_gen_add_i64(regs[r2], regs[r2], len);
2002 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2004 return DISAS_NEXT;
2007 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2009 int l = get_field(s, l1);
2010 TCGv_i64 src;
2011 TCGv_i32 vl;
2012 MemOp mop;
2014 switch (l + 1) {
2015 case 1:
2016 case 2:
2017 case 4:
2018 case 8:
2019 mop = ctz32(l + 1) | MO_TE;
2020 /* Do not update cc_src yet: loading cc_dst may cause an exception. */
2021 src = tcg_temp_new_i64();
2022 tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
2023 tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2024 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
2025 return DISAS_NEXT;
2026 default:
2027 vl = tcg_constant_i32(l);
2028 gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2029 set_cc_static(s);
2030 return DISAS_NEXT;
2034 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2036 int r1 = get_field(s, r1);
2037 int r2 = get_field(s, r2);
2038 TCGv_i32 t1, t2;
2040 /* r1 and r2 must be even. */
2041 if (r1 & 1 || r2 & 1) {
2042 gen_program_exception(s, PGM_SPECIFICATION);
2043 return DISAS_NORETURN;
2046 t1 = tcg_constant_i32(r1);
2047 t2 = tcg_constant_i32(r2);
2048 gen_helper_clcl(cc_op, tcg_env, t1, t2);
2049 set_cc_static(s);
2050 return DISAS_NEXT;
2053 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2055 int r1 = get_field(s, r1);
2056 int r3 = get_field(s, r3);
2057 TCGv_i32 t1, t3;
2059 /* r1 and r3 must be even. */
2060 if (r1 & 1 || r3 & 1) {
2061 gen_program_exception(s, PGM_SPECIFICATION);
2062 return DISAS_NORETURN;
2065 t1 = tcg_constant_i32(r1);
2066 t3 = tcg_constant_i32(r3);
2067 gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2068 set_cc_static(s);
2069 return DISAS_NEXT;
2072 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2074 int r1 = get_field(s, r1);
2075 int r3 = get_field(s, r3);
2076 TCGv_i32 t1, t3;
2078 /* r1 and r3 must be even. */
2079 if (r1 & 1 || r3 & 1) {
2080 gen_program_exception(s, PGM_SPECIFICATION);
2081 return DISAS_NORETURN;
2084 t1 = tcg_constant_i32(r1);
2085 t3 = tcg_constant_i32(r3);
2086 gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2087 set_cc_static(s);
2088 return DISAS_NEXT;
2091 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2093 TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2094 TCGv_i32 t1 = tcg_temp_new_i32();
2096 tcg_gen_extrl_i64_i32(t1, o->in1);
2097 gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2098 set_cc_static(s);
2099 return DISAS_NEXT;
2102 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2104 TCGv_i128 pair = tcg_temp_new_i128();
2106 gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2107 tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2109 set_cc_static(s);
2110 return DISAS_NEXT;
2113 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2115 TCGv_i64 t = tcg_temp_new_i64();
2116 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2117 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2118 tcg_gen_or_i64(o->out, o->out, t);
2119 return DISAS_NEXT;
2122 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2124 int d2 = get_field(s, d2);
2125 int b2 = get_field(s, b2);
2126 TCGv_i64 addr, cc;
2128 /* Note that in1 = R3 (new value) and
2129 in2 = (zero-extended) R1 (expected value). */
2131 addr = get_address(s, 0, b2, d2);
2132 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2133 get_mem_index(s), s->insn->data | MO_ALIGN);
2135 /* Are the memory and expected values (un)equal? Note that this setcond
2136 produces the output CC value, thus the NE sense of the test. */
2137 cc = tcg_temp_new_i64();
2138 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2139 tcg_gen_extrl_i64_i32(cc_op, cc);
2140 set_cc_static(s);
2142 return DISAS_NEXT;
2145 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2147 int r1 = get_field(s, r1);
2149 o->out_128 = tcg_temp_new_i128();
2150 tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2152 /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value. */
2153 tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2154 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2157 * Extract result into cc_dst:cc_src, compare vs the expected value
2158 * in the as yet unmodified input registers, then update CC_OP.
2160 tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2161 tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2162 tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2163 tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2164 set_cc_nz_u64(s, cc_dst);
2166 return DISAS_NEXT;
2169 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2171 int r3 = get_field(s, r3);
2172 TCGv_i32 t_r3 = tcg_constant_i32(r3);
2174 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2175 gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2176 } else {
2177 gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2180 set_cc_static(s);
2181 return DISAS_NEXT;
2184 #ifndef CONFIG_USER_ONLY
2185 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2187 MemOp mop = s->insn->data;
2188 TCGv_i64 addr, old, cc;
2189 TCGLabel *lab = gen_new_label();
2191 /* Note that in1 = R1 (zero-extended expected value),
2192 out = R1 (original reg), out2 = R1+1 (new value). */
2194 addr = tcg_temp_new_i64();
2195 old = tcg_temp_new_i64();
2196 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2197 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2198 get_mem_index(s), mop | MO_ALIGN);
2200 /* Are the memory and expected values (un)equal? */
2201 cc = tcg_temp_new_i64();
2202 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2203 tcg_gen_extrl_i64_i32(cc_op, cc);
2205 /* Write back the output now, so that it happens before the
2206 following branch, so that we don't need local temps. */
2207 if ((mop & MO_SIZE) == MO_32) {
2208 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2209 } else {
2210 tcg_gen_mov_i64(o->out, old);
2213 /* If the comparison was equal, and the LSB of R2 was set,
2214 then we need to flush the TLB (for all cpus). */
2215 tcg_gen_xori_i64(cc, cc, 1);
2216 tcg_gen_and_i64(cc, cc, o->in2);
2217 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2219 gen_helper_purge(tcg_env);
2220 gen_set_label(lab);
2222 return DISAS_NEXT;
2224 #endif
2226 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2228 TCGv_i64 t1 = tcg_temp_new_i64();
2229 TCGv_i32 t2 = tcg_temp_new_i32();
2230 tcg_gen_extrl_i64_i32(t2, o->in1);
2231 gen_helper_cvd(t1, t2);
2232 tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2233 return DISAS_NEXT;
2236 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o)
2238 TCGv_i128 t = tcg_temp_new_i128();
2239 gen_helper_cvdg(t, o->in1);
2240 tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128);
2241 return DISAS_NEXT;
2244 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2246 int m3 = get_field(s, m3);
2247 TCGLabel *lab = gen_new_label();
2248 TCGCond c;
2250 c = tcg_invert_cond(ltgt_cond[m3]);
2251 if (s->insn->data) {
2252 c = tcg_unsigned_cond(c);
2254 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2256 /* Trap. */
2257 gen_trap(s);
2259 gen_set_label(lab);
2260 return DISAS_NEXT;
2263 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2265 int m3 = get_field(s, m3);
2266 int r1 = get_field(s, r1);
2267 int r2 = get_field(s, r2);
2268 TCGv_i32 tr1, tr2, chk;
2270 /* R1 and R2 must both be even. */
2271 if ((r1 | r2) & 1) {
2272 gen_program_exception(s, PGM_SPECIFICATION);
2273 return DISAS_NORETURN;
2275 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2276 m3 = 0;
2279 tr1 = tcg_constant_i32(r1);
2280 tr2 = tcg_constant_i32(r2);
2281 chk = tcg_constant_i32(m3);
2283 switch (s->insn->data) {
2284 case 12:
2285 gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2286 break;
2287 case 14:
2288 gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2289 break;
2290 case 21:
2291 gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2292 break;
2293 case 24:
2294 gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2295 break;
2296 case 41:
2297 gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2298 break;
2299 case 42:
2300 gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2301 break;
2302 default:
2303 g_assert_not_reached();
2306 set_cc_static(s);
2307 return DISAS_NEXT;
2310 #ifndef CONFIG_USER_ONLY
2311 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2313 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2314 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2315 TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2317 gen_helper_diag(tcg_env, r1, r3, func_code);
2318 return DISAS_NEXT;
2320 #endif
2322 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2324 gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2325 tcg_gen_extr32_i64(o->out2, o->out, o->out);
2326 return DISAS_NEXT;
2329 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2331 gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2332 tcg_gen_extr32_i64(o->out2, o->out, o->out);
2333 return DISAS_NEXT;
2336 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2338 TCGv_i128 t = tcg_temp_new_i128();
2340 gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2341 tcg_gen_extr_i128_i64(o->out2, o->out, t);
2342 return DISAS_NEXT;
2345 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2347 TCGv_i128 t = tcg_temp_new_i128();
2349 gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2350 tcg_gen_extr_i128_i64(o->out2, o->out, t);
2351 return DISAS_NEXT;
2354 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2356 gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2357 return DISAS_NEXT;
2360 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2362 gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2363 return DISAS_NEXT;
2366 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2368 gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2369 return DISAS_NEXT;
2372 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2374 int r2 = get_field(s, r2);
2375 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2376 return DISAS_NEXT;
2379 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2381 /* No cache information provided. */
2382 tcg_gen_movi_i64(o->out, -1);
2383 return DISAS_NEXT;
2386 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2388 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2389 return DISAS_NEXT;
2392 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2394 int r1 = get_field(s, r1);
2395 int r2 = get_field(s, r2);
2396 TCGv_i64 t = tcg_temp_new_i64();
2397 TCGv_i64 t_cc = tcg_temp_new_i64();
2399 /* Note the "subsequently" in the PoO, which implies a defined result
2400 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2401 gen_op_calc_cc(s);
2402 tcg_gen_extu_i32_i64(t_cc, cc_op);
2403 tcg_gen_shri_i64(t, psw_mask, 32);
2404 tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2405 store_reg32_i64(r1, t);
2406 if (r2 != 0) {
2407 store_reg32_i64(r2, psw_mask);
2409 return DISAS_NEXT;
2412 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2414 int r1 = get_field(s, r1);
2415 TCGv_i32 ilen;
2416 TCGv_i64 v1;
2418 /* Nested EXECUTE is not allowed. */
2419 if (unlikely(s->ex_value)) {
2420 gen_program_exception(s, PGM_EXECUTE);
2421 return DISAS_NORETURN;
2424 update_psw_addr(s);
2425 update_cc_op(s);
2427 if (r1 == 0) {
2428 v1 = tcg_constant_i64(0);
2429 } else {
2430 v1 = regs[r1];
2433 ilen = tcg_constant_i32(s->ilen);
2434 gen_helper_ex(tcg_env, ilen, v1, o->in2);
2436 return DISAS_PC_CC_UPDATED;
2439 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2441 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2443 if (!m34) {
2444 return DISAS_NORETURN;
2446 gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2447 return DISAS_NEXT;
2450 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2452 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2454 if (!m34) {
2455 return DISAS_NORETURN;
2457 gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2458 return DISAS_NEXT;
2461 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2463 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2465 if (!m34) {
2466 return DISAS_NORETURN;
2468 gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2469 return DISAS_NEXT;
2472 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2474 /* We'll use the original input for cc computation, since we get to
2475 compare that against 0, which ought to be better than comparing
2476 the real output against 64. It also lets cc_dst be a convenient
2477 temporary during our computation. */
2478 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2480 /* R1 = IN ? CLZ(IN) : 64. */
2481 tcg_gen_clzi_i64(o->out, o->in2, 64);
2483 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2484 value by 64, which is undefined. But since the shift is 64 iff the
2485 input is zero, we still get the correct result after and'ing. */
2486 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2487 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2488 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2489 return DISAS_NEXT;
2492 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2494 int m3 = get_field(s, m3);
2495 int pos, len, base = s->insn->data;
2496 TCGv_i64 tmp = tcg_temp_new_i64();
2497 uint64_t ccm;
2499 switch (m3) {
2500 case 0xf:
2501 /* Effectively a 32-bit load. */
2502 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2503 len = 32;
2504 goto one_insert;
2506 case 0xc:
2507 case 0x6:
2508 case 0x3:
2509 /* Effectively a 16-bit load. */
2510 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2511 len = 16;
2512 goto one_insert;
2514 case 0x8:
2515 case 0x4:
2516 case 0x2:
2517 case 0x1:
2518 /* Effectively an 8-bit load. */
2519 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2520 len = 8;
2521 goto one_insert;
2523 one_insert:
2524 pos = base + ctz32(m3) * 8;
2525 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2526 ccm = ((1ull << len) - 1) << pos;
2527 break;
2529 case 0:
2530 /* Recognize access exceptions for the first byte. */
2531 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2532 gen_op_movi_cc(s, 0);
2533 return DISAS_NEXT;
2535 default:
2536 /* This is going to be a sequence of loads and inserts. */
2537 pos = base + 32 - 8;
2538 ccm = 0;
2539 while (m3) {
2540 if (m3 & 0x8) {
2541 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2542 tcg_gen_addi_i64(o->in2, o->in2, 1);
2543 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2544 ccm |= 0xffull << pos;
2546 m3 = (m3 << 1) & 0xf;
2547 pos -= 8;
2549 break;
2552 tcg_gen_movi_i64(tmp, ccm);
2553 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2554 return DISAS_NEXT;
2557 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2559 int shift = s->insn->data & 0xff;
2560 int size = s->insn->data >> 8;
2561 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2562 return DISAS_NEXT;
2565 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2567 TCGv_i64 t1, t2;
2569 gen_op_calc_cc(s);
2570 t1 = tcg_temp_new_i64();
2571 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2572 t2 = tcg_temp_new_i64();
2573 tcg_gen_extu_i32_i64(t2, cc_op);
2574 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2575 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2576 return DISAS_NEXT;
2579 #ifndef CONFIG_USER_ONLY
2580 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2582 TCGv_i32 m4;
2584 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2585 m4 = tcg_constant_i32(get_field(s, m4));
2586 } else {
2587 m4 = tcg_constant_i32(0);
2589 gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2590 return DISAS_NEXT;
2593 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2595 TCGv_i32 m4;
2597 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2598 m4 = tcg_constant_i32(get_field(s, m4));
2599 } else {
2600 m4 = tcg_constant_i32(0);
2602 gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2603 return DISAS_NEXT;
2606 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2608 gen_helper_iske(o->out, tcg_env, o->in2);
2609 return DISAS_NEXT;
2611 #endif
2613 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2615 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2616 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2617 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2618 TCGv_i32 t_r1, t_r2, t_r3, type;
2620 switch (s->insn->data) {
2621 case S390_FEAT_TYPE_KMA:
2622 if (r3 == r1 || r3 == r2) {
2623 gen_program_exception(s, PGM_SPECIFICATION);
2624 return DISAS_NORETURN;
2626 /* FALL THROUGH */
2627 case S390_FEAT_TYPE_KMCTR:
2628 if (r3 & 1 || !r3) {
2629 gen_program_exception(s, PGM_SPECIFICATION);
2630 return DISAS_NORETURN;
2632 /* FALL THROUGH */
2633 case S390_FEAT_TYPE_PPNO:
2634 case S390_FEAT_TYPE_KMF:
2635 case S390_FEAT_TYPE_KMC:
2636 case S390_FEAT_TYPE_KMO:
2637 case S390_FEAT_TYPE_KM:
2638 if (r1 & 1 || !r1) {
2639 gen_program_exception(s, PGM_SPECIFICATION);
2640 return DISAS_NORETURN;
2642 /* FALL THROUGH */
2643 case S390_FEAT_TYPE_KMAC:
2644 case S390_FEAT_TYPE_KIMD:
2645 case S390_FEAT_TYPE_KLMD:
2646 if (r2 & 1 || !r2) {
2647 gen_program_exception(s, PGM_SPECIFICATION);
2648 return DISAS_NORETURN;
2650 /* FALL THROUGH */
2651 case S390_FEAT_TYPE_PCKMO:
2652 case S390_FEAT_TYPE_PCC:
2653 break;
2654 default:
2655 g_assert_not_reached();
2658 t_r1 = tcg_constant_i32(r1);
2659 t_r2 = tcg_constant_i32(r2);
2660 t_r3 = tcg_constant_i32(r3);
2661 type = tcg_constant_i32(s->insn->data);
2662 gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2663 set_cc_static(s);
2664 return DISAS_NEXT;
2667 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2669 gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2670 set_cc_static(s);
2671 return DISAS_NEXT;
2674 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2676 gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2677 set_cc_static(s);
2678 return DISAS_NEXT;
2681 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2683 gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2684 set_cc_static(s);
2685 return DISAS_NEXT;
2688 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2690 /* The real output is indeed the original value in memory;
2691 recompute the addition for the computation of CC. */
2692 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2693 s->insn->data | MO_ALIGN);
2694 /* However, we need to recompute the addition for setting CC. */
2695 if (addu64) {
2696 tcg_gen_movi_i64(cc_src, 0);
2697 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2698 } else {
2699 tcg_gen_add_i64(o->out, o->in1, o->in2);
2701 return DISAS_NEXT;
2704 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2706 return help_laa(s, o, false);
2709 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2711 return help_laa(s, o, true);
2714 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2716 /* The real output is indeed the original value in memory;
2717 recompute the addition for the computation of CC. */
2718 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2719 s->insn->data | MO_ALIGN);
2720 /* However, we need to recompute the operation for setting CC. */
2721 tcg_gen_and_i64(o->out, o->in1, o->in2);
2722 return DISAS_NEXT;
2725 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2727 /* The real output is indeed the original value in memory;
2728 recompute the addition for the computation of CC. */
2729 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2730 s->insn->data | MO_ALIGN);
2731 /* However, we need to recompute the operation for setting CC. */
2732 tcg_gen_or_i64(o->out, o->in1, o->in2);
2733 return DISAS_NEXT;
2736 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2738 /* The real output is indeed the original value in memory;
2739 recompute the addition for the computation of CC. */
2740 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2741 s->insn->data | MO_ALIGN);
2742 /* However, we need to recompute the operation for setting CC. */
2743 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2744 return DISAS_NEXT;
2747 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2749 gen_helper_ldeb(o->out, tcg_env, o->in2);
2750 return DISAS_NEXT;
2753 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2755 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2757 if (!m34) {
2758 return DISAS_NORETURN;
2760 gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2761 return DISAS_NEXT;
2764 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2766 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2768 if (!m34) {
2769 return DISAS_NORETURN;
2771 gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2772 return DISAS_NEXT;
2775 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2777 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2779 if (!m34) {
2780 return DISAS_NORETURN;
2782 gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2783 return DISAS_NEXT;
2786 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2788 gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2789 return DISAS_NEXT;
2792 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2794 gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2795 return DISAS_NEXT;
2798 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2800 tcg_gen_shli_i64(o->out, o->in2, 32);
2801 return DISAS_NEXT;
2804 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2806 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2807 return DISAS_NEXT;
2810 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2812 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2813 return DISAS_NEXT;
2816 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2818 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2819 return DISAS_NEXT;
2822 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2824 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2825 return DISAS_NEXT;
2828 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2830 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2831 return DISAS_NEXT;
2834 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2836 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2837 MO_TESL | s->insn->data);
2838 return DISAS_NEXT;
2841 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2843 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2844 MO_TEUL | s->insn->data);
2845 return DISAS_NEXT;
2848 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2850 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2851 MO_TEUQ | s->insn->data);
2852 return DISAS_NEXT;
2855 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2857 TCGLabel *lab = gen_new_label();
2858 store_reg32_i64(get_field(s, r1), o->in2);
2859 /* The value is stored even in case of trap. */
2860 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2861 gen_trap(s);
2862 gen_set_label(lab);
2863 return DISAS_NEXT;
2866 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2868 TCGLabel *lab = gen_new_label();
2869 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2870 /* The value is stored even in case of trap. */
2871 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2872 gen_trap(s);
2873 gen_set_label(lab);
2874 return DISAS_NEXT;
2877 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2879 TCGLabel *lab = gen_new_label();
2880 store_reg32h_i64(get_field(s, r1), o->in2);
2881 /* The value is stored even in case of trap. */
2882 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2883 gen_trap(s);
2884 gen_set_label(lab);
2885 return DISAS_NEXT;
2888 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2890 TCGLabel *lab = gen_new_label();
2892 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2893 /* The value is stored even in case of trap. */
2894 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2895 gen_trap(s);
2896 gen_set_label(lab);
2897 return DISAS_NEXT;
2900 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2902 TCGLabel *lab = gen_new_label();
2903 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2904 /* The value is stored even in case of trap. */
2905 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2906 gen_trap(s);
2907 gen_set_label(lab);
2908 return DISAS_NEXT;
2911 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2913 DisasCompare c;
2915 if (have_field(s, m3)) {
2916 /* LOAD * ON CONDITION */
2917 disas_jcc(s, &c, get_field(s, m3));
2918 } else {
2919 /* SELECT */
2920 disas_jcc(s, &c, get_field(s, m4));
2923 if (c.is_64) {
2924 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2925 o->in2, o->in1);
2926 } else {
2927 TCGv_i32 t32 = tcg_temp_new_i32();
2928 TCGv_i64 t, z;
2930 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2932 t = tcg_temp_new_i64();
2933 tcg_gen_extu_i32_i64(t, t32);
2935 z = tcg_constant_i64(0);
2936 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2939 return DISAS_NEXT;
2942 #ifndef CONFIG_USER_ONLY
2943 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2945 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2946 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2948 gen_helper_lctl(tcg_env, r1, o->in2, r3);
2949 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2950 s->exit_to_mainloop = true;
2951 return DISAS_TOO_MANY;
2954 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2956 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2957 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2959 gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2960 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2961 s->exit_to_mainloop = true;
2962 return DISAS_TOO_MANY;
2965 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2967 gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2968 set_cc_static(s);
2969 return DISAS_NEXT;
2972 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2974 tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2975 return DISAS_NEXT;
2978 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2980 TCGv_i64 mask, addr;
2982 per_breaking_event(s);
2985 * Convert the short PSW into the normal PSW, similar to what
2986 * s390_cpu_load_normal() does.
2988 mask = tcg_temp_new_i64();
2989 addr = tcg_temp_new_i64();
2990 tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2991 tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2992 tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2993 tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2994 gen_helper_load_psw(tcg_env, mask, addr);
2995 return DISAS_NORETURN;
2998 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3000 TCGv_i64 t1, t2;
3002 per_breaking_event(s);
3004 t1 = tcg_temp_new_i64();
3005 t2 = tcg_temp_new_i64();
3006 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3007 MO_TEUQ | MO_ALIGN_8);
3008 tcg_gen_addi_i64(o->in2, o->in2, 8);
3009 tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
3010 gen_helper_load_psw(tcg_env, t1, t2);
3011 return DISAS_NORETURN;
3013 #endif
3015 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3017 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3018 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3020 gen_helper_lam(tcg_env, r1, o->in2, r3);
3021 return DISAS_NEXT;
3024 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3026 int r1 = get_field(s, r1);
3027 int r3 = get_field(s, r3);
3028 TCGv_i64 t1, t2;
3030 /* Only one register to read. */
3031 t1 = tcg_temp_new_i64();
3032 if (unlikely(r1 == r3)) {
3033 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3034 store_reg32_i64(r1, t1);
3035 return DISAS_NEXT;
3038 /* First load the values of the first and last registers to trigger
3039 possible page faults. */
3040 t2 = tcg_temp_new_i64();
3041 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3042 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3043 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3044 store_reg32_i64(r1, t1);
3045 store_reg32_i64(r3, t2);
3047 /* Only two registers to read. */
3048 if (((r1 + 1) & 15) == r3) {
3049 return DISAS_NEXT;
3052 /* Then load the remaining registers. Page fault can't occur. */
3053 r3 = (r3 - 1) & 15;
3054 tcg_gen_movi_i64(t2, 4);
3055 while (r1 != r3) {
3056 r1 = (r1 + 1) & 15;
3057 tcg_gen_add_i64(o->in2, o->in2, t2);
3058 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3059 store_reg32_i64(r1, t1);
3061 return DISAS_NEXT;
3064 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3066 int r1 = get_field(s, r1);
3067 int r3 = get_field(s, r3);
3068 TCGv_i64 t1, t2;
3070 /* Only one register to read. */
3071 t1 = tcg_temp_new_i64();
3072 if (unlikely(r1 == r3)) {
3073 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3074 store_reg32h_i64(r1, t1);
3075 return DISAS_NEXT;
3078 /* First load the values of the first and last registers to trigger
3079 possible page faults. */
3080 t2 = tcg_temp_new_i64();
3081 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3082 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3083 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3084 store_reg32h_i64(r1, t1);
3085 store_reg32h_i64(r3, t2);
3087 /* Only two registers to read. */
3088 if (((r1 + 1) & 15) == r3) {
3089 return DISAS_NEXT;
3092 /* Then load the remaining registers. Page fault can't occur. */
3093 r3 = (r3 - 1) & 15;
3094 tcg_gen_movi_i64(t2, 4);
3095 while (r1 != r3) {
3096 r1 = (r1 + 1) & 15;
3097 tcg_gen_add_i64(o->in2, o->in2, t2);
3098 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3099 store_reg32h_i64(r1, t1);
3101 return DISAS_NEXT;
3104 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3106 int r1 = get_field(s, r1);
3107 int r3 = get_field(s, r3);
3108 TCGv_i64 t1, t2;
3110 /* Only one register to read. */
3111 if (unlikely(r1 == r3)) {
3112 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3113 return DISAS_NEXT;
3116 /* First load the values of the first and last registers to trigger
3117 possible page faults. */
3118 t1 = tcg_temp_new_i64();
3119 t2 = tcg_temp_new_i64();
3120 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3121 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3122 tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3123 tcg_gen_mov_i64(regs[r1], t1);
3125 /* Only two registers to read. */
3126 if (((r1 + 1) & 15) == r3) {
3127 return DISAS_NEXT;
3130 /* Then load the remaining registers. Page fault can't occur. */
3131 r3 = (r3 - 1) & 15;
3132 tcg_gen_movi_i64(t1, 8);
3133 while (r1 != r3) {
3134 r1 = (r1 + 1) & 15;
3135 tcg_gen_add_i64(o->in2, o->in2, t1);
3136 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3138 return DISAS_NEXT;
3141 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3143 TCGv_i64 a1, a2;
3144 MemOp mop = s->insn->data;
3146 /* In a parallel context, stop the world and single step. */
3147 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3148 update_psw_addr(s);
3149 update_cc_op(s);
3150 gen_exception(EXCP_ATOMIC);
3151 return DISAS_NORETURN;
3154 /* In a serial context, perform the two loads ... */
3155 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3156 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3157 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3158 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3160 /* ... and indicate that we performed them while interlocked. */
3161 gen_op_movi_cc(s, 0);
3162 return DISAS_NEXT;
3165 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3167 o->out_128 = tcg_temp_new_i128();
3168 tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3169 MO_TE | MO_128 | MO_ALIGN);
3170 return DISAS_NEXT;
3173 #ifndef CONFIG_USER_ONLY
3174 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3176 tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3177 return DISAS_NEXT;
3179 #endif
3181 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3183 tcg_gen_andi_i64(o->out, o->in2, -256);
3184 return DISAS_NEXT;
3187 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3189 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3191 if (get_field(s, m3) > 6) {
3192 gen_program_exception(s, PGM_SPECIFICATION);
3193 return DISAS_NORETURN;
3196 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3197 tcg_gen_neg_i64(o->addr1, o->addr1);
3198 tcg_gen_movi_i64(o->out, 16);
3199 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3200 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3201 return DISAS_NEXT;
3204 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3206 const uint8_t monitor_class = get_field(s, i2);
3208 if (monitor_class & 0xf0) {
3209 gen_program_exception(s, PGM_SPECIFICATION);
3210 return DISAS_NORETURN;
3213 #if !defined(CONFIG_USER_ONLY)
3214 gen_helper_monitor_call(tcg_env, o->addr1,
3215 tcg_constant_i32(monitor_class));
3216 #endif
3217 /* Defaults to a NOP. */
3218 return DISAS_NEXT;
3221 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3223 o->out = o->in2;
3224 o->in2 = NULL;
3225 return DISAS_NEXT;
3228 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3230 int b2 = get_field(s, b2);
3231 TCGv ar1 = tcg_temp_new_i64();
3232 int r1 = get_field(s, r1);
3234 o->out = o->in2;
3235 o->in2 = NULL;
3237 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3238 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3239 tcg_gen_movi_i64(ar1, 0);
3240 break;
3241 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3242 tcg_gen_movi_i64(ar1, 1);
3243 break;
3244 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3245 if (b2) {
3246 tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3247 } else {
3248 tcg_gen_movi_i64(ar1, 0);
3250 break;
3251 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3252 tcg_gen_movi_i64(ar1, 2);
3253 break;
3256 tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3257 return DISAS_NEXT;
3260 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3262 o->out = o->in1;
3263 o->out2 = o->in2;
3264 o->in1 = NULL;
3265 o->in2 = NULL;
3266 return DISAS_NEXT;
3269 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3271 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3273 gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3274 return DISAS_NEXT;
3277 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3279 gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3280 return DISAS_NEXT;
3283 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3285 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3287 gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3288 return DISAS_NEXT;
3291 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3293 int r1 = get_field(s, r1);
3294 int r2 = get_field(s, r2);
3295 TCGv_i32 t1, t2;
3297 /* r1 and r2 must be even. */
3298 if (r1 & 1 || r2 & 1) {
3299 gen_program_exception(s, PGM_SPECIFICATION);
3300 return DISAS_NORETURN;
3303 t1 = tcg_constant_i32(r1);
3304 t2 = tcg_constant_i32(r2);
3305 gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3306 set_cc_static(s);
3307 return DISAS_NEXT;
3310 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3312 int r1 = get_field(s, r1);
3313 int r3 = get_field(s, r3);
3314 TCGv_i32 t1, t3;
3316 /* r1 and r3 must be even. */
3317 if (r1 & 1 || r3 & 1) {
3318 gen_program_exception(s, PGM_SPECIFICATION);
3319 return DISAS_NORETURN;
3322 t1 = tcg_constant_i32(r1);
3323 t3 = tcg_constant_i32(r3);
3324 gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3325 set_cc_static(s);
3326 return DISAS_NEXT;
3329 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3331 int r1 = get_field(s, r1);
3332 int r3 = get_field(s, r3);
3333 TCGv_i32 t1, t3;
3335 /* r1 and r3 must be even. */
3336 if (r1 & 1 || r3 & 1) {
3337 gen_program_exception(s, PGM_SPECIFICATION);
3338 return DISAS_NORETURN;
3341 t1 = tcg_constant_i32(r1);
3342 t3 = tcg_constant_i32(r3);
3343 gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3344 set_cc_static(s);
3345 return DISAS_NEXT;
3348 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3350 int r3 = get_field(s, r3);
3351 gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3352 set_cc_static(s);
3353 return DISAS_NEXT;
3356 #ifndef CONFIG_USER_ONLY
3357 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3359 int r1 = get_field(s, l1);
3360 int r3 = get_field(s, r3);
3361 gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3362 set_cc_static(s);
3363 return DISAS_NEXT;
3366 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3368 int r1 = get_field(s, l1);
3369 int r3 = get_field(s, r3);
3370 gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3371 set_cc_static(s);
3372 return DISAS_NEXT;
3374 #endif
3376 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3378 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3380 gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3381 return DISAS_NEXT;
3384 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3386 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3388 gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3389 return DISAS_NEXT;
3392 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3394 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3395 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3397 gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3398 set_cc_static(s);
3399 return DISAS_NEXT;
3402 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3404 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3405 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3407 gen_helper_mvst(cc_op, tcg_env, t1, t2);
3408 set_cc_static(s);
3409 return DISAS_NEXT;
3412 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3414 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3416 gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3417 return DISAS_NEXT;
3420 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3422 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3423 return DISAS_NEXT;
3426 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3428 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3429 return DISAS_NEXT;
3432 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3434 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3435 return DISAS_NEXT;
3438 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3440 gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3441 return DISAS_NEXT;
3444 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3446 gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3447 return DISAS_NEXT;
3450 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3452 gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3453 return DISAS_NEXT;
3456 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3458 gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3459 return DISAS_NEXT;
3462 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3464 gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3465 return DISAS_NEXT;
3468 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3470 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3471 gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3472 return DISAS_NEXT;
3475 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3477 TCGv_i64 r3 = load_freg(get_field(s, r3));
3478 gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3479 return DISAS_NEXT;
3482 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3484 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3485 gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3486 return DISAS_NEXT;
3489 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3491 TCGv_i64 r3 = load_freg(get_field(s, r3));
3492 gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3493 return DISAS_NEXT;
3496 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3498 TCGv_i64 z = tcg_constant_i64(0);
3499 TCGv_i64 n = tcg_temp_new_i64();
3501 tcg_gen_neg_i64(n, o->in2);
3502 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3503 return DISAS_NEXT;
3506 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3508 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3509 return DISAS_NEXT;
3512 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3514 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3515 return DISAS_NEXT;
3518 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3520 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3521 tcg_gen_mov_i64(o->out2, o->in2);
3522 return DISAS_NEXT;
3525 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3527 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3529 gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3530 set_cc_static(s);
3531 return DISAS_NEXT;
3534 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3536 tcg_gen_neg_i64(o->out, o->in2);
3537 return DISAS_NEXT;
3540 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3542 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3543 return DISAS_NEXT;
3546 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3548 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3549 return DISAS_NEXT;
3552 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3554 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3555 tcg_gen_mov_i64(o->out2, o->in2);
3556 return DISAS_NEXT;
3559 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3561 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3563 gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3564 set_cc_static(s);
3565 return DISAS_NEXT;
3568 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3570 tcg_gen_or_i64(o->out, o->in1, o->in2);
3571 return DISAS_NEXT;
3574 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3576 int shift = s->insn->data & 0xff;
3577 int size = s->insn->data >> 8;
3578 uint64_t mask = ((1ull << size) - 1) << shift;
3579 TCGv_i64 t = tcg_temp_new_i64();
3581 tcg_gen_shli_i64(t, o->in2, shift);
3582 tcg_gen_or_i64(o->out, o->in1, t);
3584 /* Produce the CC from only the bits manipulated. */
3585 tcg_gen_andi_i64(cc_dst, o->out, mask);
3586 set_cc_nz_u64(s, cc_dst);
3587 return DISAS_NEXT;
3590 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3592 o->in1 = tcg_temp_new_i64();
3594 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3595 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3596 } else {
3597 /* Perform the atomic operation in memory. */
3598 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3599 s->insn->data);
3602 /* Recompute also for atomic case: needed for setting CC. */
3603 tcg_gen_or_i64(o->out, o->in1, o->in2);
3605 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3606 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3608 return DISAS_NEXT;
3611 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3613 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3615 gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3616 return DISAS_NEXT;
3619 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3621 int l2 = get_field(s, l2) + 1;
3622 TCGv_i32 l;
3624 /* The length must not exceed 32 bytes. */
3625 if (l2 > 32) {
3626 gen_program_exception(s, PGM_SPECIFICATION);
3627 return DISAS_NORETURN;
3629 l = tcg_constant_i32(l2);
3630 gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3631 return DISAS_NEXT;
3634 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3636 int l2 = get_field(s, l2) + 1;
3637 TCGv_i32 l;
3639 /* The length must be even and should not exceed 64 bytes. */
3640 if ((l2 & 1) || (l2 > 64)) {
3641 gen_program_exception(s, PGM_SPECIFICATION);
3642 return DISAS_NORETURN;
3644 l = tcg_constant_i32(l2);
3645 gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3646 return DISAS_NEXT;
3649 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3651 const uint8_t m3 = get_field(s, m3);
3653 if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3654 tcg_gen_ctpop_i64(o->out, o->in2);
3655 } else {
3656 gen_helper_popcnt(o->out, o->in2);
3658 return DISAS_NEXT;
3661 #ifndef CONFIG_USER_ONLY
3662 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3664 gen_helper_ptlb(tcg_env);
3665 return DISAS_NEXT;
3667 #endif
3669 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3671 int i3 = get_field(s, i3);
3672 int i4 = get_field(s, i4);
3673 int i5 = get_field(s, i5);
3674 int do_zero = i4 & 0x80;
3675 uint64_t mask, imask, pmask;
3676 int pos, len, rot;
3678 /* Adjust the arguments for the specific insn. */
3679 switch (s->fields.op2) {
3680 case 0x55: /* risbg */
3681 case 0x59: /* risbgn */
3682 i3 &= 63;
3683 i4 &= 63;
3684 pmask = ~0;
3685 break;
3686 case 0x5d: /* risbhg */
3687 i3 &= 31;
3688 i4 &= 31;
3689 pmask = 0xffffffff00000000ull;
3690 break;
3691 case 0x51: /* risblg */
3692 i3 = (i3 & 31) + 32;
3693 i4 = (i4 & 31) + 32;
3694 pmask = 0x00000000ffffffffull;
3695 break;
3696 default:
3697 g_assert_not_reached();
3700 /* MASK is the set of bits to be inserted from R2. */
3701 if (i3 <= i4) {
3702 /* [0...i3---i4...63] */
3703 mask = (-1ull >> i3) & (-1ull << (63 - i4));
3704 } else {
3705 /* [0---i4...i3---63] */
3706 mask = (-1ull >> i3) | (-1ull << (63 - i4));
3708 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3709 mask &= pmask;
3711 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3712 insns, we need to keep the other half of the register. */
3713 imask = ~mask | ~pmask;
3714 if (do_zero) {
3715 imask = ~pmask;
3718 len = i4 - i3 + 1;
3719 pos = 63 - i4;
3720 rot = i5 & 63;
3722 /* In some cases we can implement this with extract. */
3723 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3724 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3725 return DISAS_NEXT;
3728 /* In some cases we can implement this with deposit. */
3729 if (len > 0 && (imask == 0 || ~mask == imask)) {
3730 /* Note that we rotate the bits to be inserted to the lsb, not to
3731 the position as described in the PoO. */
3732 rot = (rot - pos) & 63;
3733 } else {
3734 pos = -1;
3737 /* Rotate the input as necessary. */
3738 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3740 /* Insert the selected bits into the output. */
3741 if (pos >= 0) {
3742 if (imask == 0) {
3743 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3744 } else {
3745 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3747 } else if (imask == 0) {
3748 tcg_gen_andi_i64(o->out, o->in2, mask);
3749 } else {
3750 tcg_gen_andi_i64(o->in2, o->in2, mask);
3751 tcg_gen_andi_i64(o->out, o->out, imask);
3752 tcg_gen_or_i64(o->out, o->out, o->in2);
3754 return DISAS_NEXT;
3757 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3759 int i3 = get_field(s, i3);
3760 int i4 = get_field(s, i4);
3761 int i5 = get_field(s, i5);
3762 TCGv_i64 orig_out;
3763 uint64_t mask;
3765 /* If this is a test-only form, arrange to discard the result. */
3766 if (i3 & 0x80) {
3767 tcg_debug_assert(o->out != NULL);
3768 orig_out = o->out;
3769 o->out = tcg_temp_new_i64();
3770 tcg_gen_mov_i64(o->out, orig_out);
3773 i3 &= 63;
3774 i4 &= 63;
3775 i5 &= 63;
3777 /* MASK is the set of bits to be operated on from R2.
3778 Take care for I3/I4 wraparound. */
3779 mask = ~0ull >> i3;
3780 if (i3 <= i4) {
3781 mask ^= ~0ull >> i4 >> 1;
3782 } else {
3783 mask |= ~(~0ull >> i4 >> 1);
3786 /* Rotate the input as necessary. */
3787 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3789 /* Operate. */
3790 switch (s->fields.op2) {
3791 case 0x54: /* AND */
3792 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3793 tcg_gen_and_i64(o->out, o->out, o->in2);
3794 break;
3795 case 0x56: /* OR */
3796 tcg_gen_andi_i64(o->in2, o->in2, mask);
3797 tcg_gen_or_i64(o->out, o->out, o->in2);
3798 break;
3799 case 0x57: /* XOR */
3800 tcg_gen_andi_i64(o->in2, o->in2, mask);
3801 tcg_gen_xor_i64(o->out, o->out, o->in2);
3802 break;
3803 default:
3804 abort();
3807 /* Set the CC. */
3808 tcg_gen_andi_i64(cc_dst, o->out, mask);
3809 set_cc_nz_u64(s, cc_dst);
3810 return DISAS_NEXT;
3813 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3815 tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3816 return DISAS_NEXT;
3819 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3821 tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3822 return DISAS_NEXT;
3825 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3827 tcg_gen_bswap64_i64(o->out, o->in2);
3828 return DISAS_NEXT;
3831 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3833 TCGv_i32 t1 = tcg_temp_new_i32();
3834 TCGv_i32 t2 = tcg_temp_new_i32();
3835 TCGv_i32 to = tcg_temp_new_i32();
3836 tcg_gen_extrl_i64_i32(t1, o->in1);
3837 tcg_gen_extrl_i64_i32(t2, o->in2);
3838 tcg_gen_rotl_i32(to, t1, t2);
3839 tcg_gen_extu_i32_i64(o->out, to);
3840 return DISAS_NEXT;
3843 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3845 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3846 return DISAS_NEXT;
3849 #ifndef CONFIG_USER_ONLY
3850 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3852 gen_helper_rrbe(cc_op, tcg_env, o->in2);
3853 set_cc_static(s);
3854 return DISAS_NEXT;
3857 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3859 gen_helper_sacf(tcg_env, o->in2);
3860 /* Addressing mode has changed, so end the block. */
3861 return DISAS_TOO_MANY;
3863 #endif
3865 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3867 int sam = s->insn->data;
3868 TCGv_i64 tsam;
3869 uint64_t mask;
3871 switch (sam) {
3872 case 0:
3873 mask = 0xffffff;
3874 break;
3875 case 1:
3876 mask = 0x7fffffff;
3877 break;
3878 default:
3879 mask = -1;
3880 break;
3883 /* Bizarre but true, we check the address of the current insn for the
3884 specification exception, not the next to be executed. Thus the PoO
3885 documents that Bad Things Happen two bytes before the end. */
3886 if (s->base.pc_next & ~mask) {
3887 gen_program_exception(s, PGM_SPECIFICATION);
3888 return DISAS_NORETURN;
3890 s->pc_tmp &= mask;
3892 tsam = tcg_constant_i64(sam);
3893 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3895 /* Always exit the TB, since we (may have) changed execution mode. */
3896 return DISAS_TOO_MANY;
3899 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3901 int r1 = get_field(s, r1);
3902 tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3903 return DISAS_NEXT;
3906 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3908 gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3909 return DISAS_NEXT;
3912 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3914 gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3915 return DISAS_NEXT;
3918 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3920 gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3921 return DISAS_NEXT;
3924 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3926 gen_helper_sqeb(o->out, tcg_env, o->in2);
3927 return DISAS_NEXT;
3930 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3932 gen_helper_sqdb(o->out, tcg_env, o->in2);
3933 return DISAS_NEXT;
3936 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3938 gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3939 return DISAS_NEXT;
3942 #ifndef CONFIG_USER_ONLY
3943 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3945 gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3946 set_cc_static(s);
3947 return DISAS_NEXT;
3950 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3952 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3953 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3955 gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3956 set_cc_static(s);
3957 return DISAS_NEXT;
3959 #endif
3961 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3963 DisasCompare c;
3964 TCGv_i64 a, h;
3965 TCGLabel *lab;
3966 int r1;
3968 disas_jcc(s, &c, get_field(s, m3));
3970 /* We want to store when the condition is fulfilled, so branch
3971 out when it's not */
3972 c.cond = tcg_invert_cond(c.cond);
3974 lab = gen_new_label();
3975 if (c.is_64) {
3976 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3977 } else {
3978 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3981 r1 = get_field(s, r1);
3982 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3983 switch (s->insn->data) {
3984 case 1: /* STOCG */
3985 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3986 break;
3987 case 0: /* STOC */
3988 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3989 break;
3990 case 2: /* STOCFH */
3991 h = tcg_temp_new_i64();
3992 tcg_gen_shri_i64(h, regs[r1], 32);
3993 tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3994 break;
3995 default:
3996 g_assert_not_reached();
3999 gen_set_label(lab);
4000 return DISAS_NEXT;
4003 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4005 TCGv_i64 t;
4006 uint64_t sign = 1ull << s->insn->data;
4007 if (s->insn->data == 31) {
4008 t = tcg_temp_new_i64();
4009 tcg_gen_shli_i64(t, o->in1, 32);
4010 } else {
4011 t = o->in1;
4013 gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4014 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4015 /* The arithmetic left shift is curious in that it does not affect
4016 the sign bit. Copy that over from the source unchanged. */
4017 tcg_gen_andi_i64(o->out, o->out, ~sign);
4018 tcg_gen_andi_i64(o->in1, o->in1, sign);
4019 tcg_gen_or_i64(o->out, o->out, o->in1);
4020 return DISAS_NEXT;
4023 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4025 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4026 return DISAS_NEXT;
4029 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4031 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4032 return DISAS_NEXT;
4035 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4037 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4038 return DISAS_NEXT;
4041 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4043 gen_helper_sfpc(tcg_env, o->in2);
4044 return DISAS_NEXT;
4047 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4049 gen_helper_sfas(tcg_env, o->in2);
4050 return DISAS_NEXT;
4053 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4055 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4056 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4057 gen_helper_srnm(tcg_env, o->addr1);
4058 return DISAS_NEXT;
4061 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4063 /* Bits 0-55 are are ignored. */
4064 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4065 gen_helper_srnm(tcg_env, o->addr1);
4066 return DISAS_NEXT;
4069 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4071 TCGv_i64 tmp = tcg_temp_new_i64();
4073 /* Bits other than 61-63 are ignored. */
4074 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4076 /* No need to call a helper, we don't implement dfp */
4077 tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4078 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4079 tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4080 return DISAS_NEXT;
4083 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4085 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4086 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4087 set_cc_static(s);
4089 tcg_gen_shri_i64(o->in1, o->in1, 24);
4090 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4091 return DISAS_NEXT;
4094 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4096 int b1 = get_field(s, b1);
4097 int d1 = get_field(s, d1);
4098 int b2 = get_field(s, b2);
4099 int d2 = get_field(s, d2);
4100 int r3 = get_field(s, r3);
4101 TCGv_i64 tmp = tcg_temp_new_i64();
4103 /* fetch all operands first */
4104 o->in1 = tcg_temp_new_i64();
4105 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4106 o->in2 = tcg_temp_new_i64();
4107 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4108 o->addr1 = tcg_temp_new_i64();
4109 gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4111 /* load the third operand into r3 before modifying anything */
4112 tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4114 /* subtract CPU timer from first operand and store in GR0 */
4115 gen_helper_stpt(tmp, tcg_env);
4116 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4118 /* store second operand in GR1 */
4119 tcg_gen_mov_i64(regs[1], o->in2);
4120 return DISAS_NEXT;
4123 #ifndef CONFIG_USER_ONLY
4124 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4126 tcg_gen_shri_i64(o->in2, o->in2, 4);
4127 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4128 return DISAS_NEXT;
4131 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4133 gen_helper_sske(tcg_env, o->in1, o->in2);
4134 return DISAS_NEXT;
4137 static void gen_check_psw_mask(DisasContext *s)
4139 TCGv_i64 reserved = tcg_temp_new_i64();
4140 TCGLabel *ok = gen_new_label();
4142 tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4143 tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4144 gen_program_exception(s, PGM_SPECIFICATION);
4145 gen_set_label(ok);
4148 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4150 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4152 gen_check_psw_mask(s);
4154 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4155 s->exit_to_mainloop = true;
4156 return DISAS_TOO_MANY;
4159 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4161 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4162 return DISAS_NEXT;
4164 #endif
4166 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4168 gen_helper_stck(o->out, tcg_env);
4169 /* ??? We don't implement clock states. */
4170 gen_op_movi_cc(s, 0);
4171 return DISAS_NEXT;
4174 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4176 TCGv_i64 c1 = tcg_temp_new_i64();
4177 TCGv_i64 c2 = tcg_temp_new_i64();
4178 TCGv_i64 todpr = tcg_temp_new_i64();
4179 gen_helper_stck(c1, tcg_env);
4180 /* 16 bit value store in an uint32_t (only valid bits set) */
4181 tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4182 /* Shift the 64-bit value into its place as a zero-extended
4183 104-bit value. Note that "bit positions 64-103 are always
4184 non-zero so that they compare differently to STCK"; we set
4185 the least significant bit to 1. */
4186 tcg_gen_shli_i64(c2, c1, 56);
4187 tcg_gen_shri_i64(c1, c1, 8);
4188 tcg_gen_ori_i64(c2, c2, 0x10000);
4189 tcg_gen_or_i64(c2, c2, todpr);
4190 tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4191 tcg_gen_addi_i64(o->in2, o->in2, 8);
4192 tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4193 /* ??? We don't implement clock states. */
4194 gen_op_movi_cc(s, 0);
4195 return DISAS_NEXT;
4198 #ifndef CONFIG_USER_ONLY
4199 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4201 gen_helper_sck(cc_op, tcg_env, o->in2);
4202 set_cc_static(s);
4203 return DISAS_NEXT;
4206 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4208 gen_helper_sckc(tcg_env, o->in2);
4209 return DISAS_NEXT;
4212 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4214 gen_helper_sckpf(tcg_env, regs[0]);
4215 return DISAS_NEXT;
4218 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4220 gen_helper_stckc(o->out, tcg_env);
4221 return DISAS_NEXT;
4224 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4226 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4227 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4229 gen_helper_stctg(tcg_env, r1, o->in2, r3);
4230 return DISAS_NEXT;
4233 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4235 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4236 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4238 gen_helper_stctl(tcg_env, r1, o->in2, r3);
4239 return DISAS_NEXT;
4242 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4244 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4245 return DISAS_NEXT;
4248 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4250 gen_helper_spt(tcg_env, o->in2);
4251 return DISAS_NEXT;
4254 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4256 gen_helper_stfl(tcg_env);
4257 return DISAS_NEXT;
4260 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4262 gen_helper_stpt(o->out, tcg_env);
4263 return DISAS_NEXT;
4266 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4268 gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4269 set_cc_static(s);
4270 return DISAS_NEXT;
4273 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4275 gen_helper_spx(tcg_env, o->in2);
4276 return DISAS_NEXT;
4279 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4281 gen_helper_xsch(tcg_env, regs[1]);
4282 set_cc_static(s);
4283 return DISAS_NEXT;
4286 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4288 gen_helper_csch(tcg_env, regs[1]);
4289 set_cc_static(s);
4290 return DISAS_NEXT;
4293 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4295 gen_helper_hsch(tcg_env, regs[1]);
4296 set_cc_static(s);
4297 return DISAS_NEXT;
4300 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4302 gen_helper_msch(tcg_env, regs[1], o->in2);
4303 set_cc_static(s);
4304 return DISAS_NEXT;
4307 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4309 gen_helper_rchp(tcg_env, regs[1]);
4310 set_cc_static(s);
4311 return DISAS_NEXT;
4314 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4316 gen_helper_rsch(tcg_env, regs[1]);
4317 set_cc_static(s);
4318 return DISAS_NEXT;
4321 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4323 gen_helper_sal(tcg_env, regs[1]);
4324 return DISAS_NEXT;
4327 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4329 gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4330 return DISAS_NEXT;
4333 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4335 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4336 gen_op_movi_cc(s, 3);
4337 return DISAS_NEXT;
4340 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4342 /* The instruction is suppressed if not provided. */
4343 return DISAS_NEXT;
4346 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4348 gen_helper_ssch(tcg_env, regs[1], o->in2);
4349 set_cc_static(s);
4350 return DISAS_NEXT;
4353 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4355 gen_helper_stsch(tcg_env, regs[1], o->in2);
4356 set_cc_static(s);
4357 return DISAS_NEXT;
4360 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4362 gen_helper_stcrw(tcg_env, o->in2);
4363 set_cc_static(s);
4364 return DISAS_NEXT;
4367 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4369 gen_helper_tpi(cc_op, tcg_env, o->addr1);
4370 set_cc_static(s);
4371 return DISAS_NEXT;
4374 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4376 gen_helper_tsch(tcg_env, regs[1], o->in2);
4377 set_cc_static(s);
4378 return DISAS_NEXT;
4381 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4383 gen_helper_chsc(tcg_env, o->in2);
4384 set_cc_static(s);
4385 return DISAS_NEXT;
4388 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4390 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4391 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4392 return DISAS_NEXT;
4395 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4397 uint64_t i2 = get_field(s, i2);
4398 TCGv_i64 t;
4400 /* It is important to do what the instruction name says: STORE THEN.
4401 If we let the output hook perform the store then if we fault and
4402 restart, we'll have the wrong SYSTEM MASK in place. */
4403 t = tcg_temp_new_i64();
4404 tcg_gen_shri_i64(t, psw_mask, 56);
4405 tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4407 if (s->fields.op == 0xac) {
4408 tcg_gen_andi_i64(psw_mask, psw_mask,
4409 (i2 << 56) | 0x00ffffffffffffffull);
4410 } else {
4411 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4414 gen_check_psw_mask(s);
4416 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4417 s->exit_to_mainloop = true;
4418 return DISAS_TOO_MANY;
4421 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4423 tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4425 if (s->base.tb->flags & FLAG_MASK_PER) {
4426 update_psw_addr(s);
4427 gen_helper_per_store_real(tcg_env);
4429 return DISAS_NEXT;
4431 #endif
4433 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4435 gen_helper_stfle(cc_op, tcg_env, o->in2);
4436 set_cc_static(s);
4437 return DISAS_NEXT;
4440 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4442 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4443 return DISAS_NEXT;
4446 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4448 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4449 return DISAS_NEXT;
4452 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4454 tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4455 MO_TEUL | s->insn->data);
4456 return DISAS_NEXT;
4459 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4461 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4462 MO_TEUQ | s->insn->data);
4463 return DISAS_NEXT;
4466 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4468 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4469 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4471 gen_helper_stam(tcg_env, r1, o->in2, r3);
4472 return DISAS_NEXT;
4475 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4477 int m3 = get_field(s, m3);
4478 int pos, base = s->insn->data;
4479 TCGv_i64 tmp = tcg_temp_new_i64();
4481 pos = base + ctz32(m3) * 8;
4482 switch (m3) {
4483 case 0xf:
4484 /* Effectively a 32-bit store. */
4485 tcg_gen_shri_i64(tmp, o->in1, pos);
4486 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4487 break;
4489 case 0xc:
4490 case 0x6:
4491 case 0x3:
4492 /* Effectively a 16-bit store. */
4493 tcg_gen_shri_i64(tmp, o->in1, pos);
4494 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4495 break;
4497 case 0x8:
4498 case 0x4:
4499 case 0x2:
4500 case 0x1:
4501 /* Effectively an 8-bit store. */
4502 tcg_gen_shri_i64(tmp, o->in1, pos);
4503 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4504 break;
4506 default:
4507 /* This is going to be a sequence of shifts and stores. */
4508 pos = base + 32 - 8;
4509 while (m3) {
4510 if (m3 & 0x8) {
4511 tcg_gen_shri_i64(tmp, o->in1, pos);
4512 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4513 tcg_gen_addi_i64(o->in2, o->in2, 1);
4515 m3 = (m3 << 1) & 0xf;
4516 pos -= 8;
4518 break;
4520 return DISAS_NEXT;
4523 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4525 int r1 = get_field(s, r1);
4526 int r3 = get_field(s, r3);
4527 int size = s->insn->data;
4528 TCGv_i64 tsize = tcg_constant_i64(size);
4530 while (1) {
4531 tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4532 size == 8 ? MO_TEUQ : MO_TEUL);
4533 if (r1 == r3) {
4534 break;
4536 tcg_gen_add_i64(o->in2, o->in2, tsize);
4537 r1 = (r1 + 1) & 15;
4540 return DISAS_NEXT;
4543 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4545 int r1 = get_field(s, r1);
4546 int r3 = get_field(s, r3);
4547 TCGv_i64 t = tcg_temp_new_i64();
4548 TCGv_i64 t4 = tcg_constant_i64(4);
4549 TCGv_i64 t32 = tcg_constant_i64(32);
4551 while (1) {
4552 tcg_gen_shl_i64(t, regs[r1], t32);
4553 tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4554 if (r1 == r3) {
4555 break;
4557 tcg_gen_add_i64(o->in2, o->in2, t4);
4558 r1 = (r1 + 1) & 15;
4560 return DISAS_NEXT;
4563 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4565 TCGv_i128 t16 = tcg_temp_new_i128();
4567 tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4568 tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4569 MO_TE | MO_128 | MO_ALIGN);
4570 return DISAS_NEXT;
4573 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4575 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4576 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4578 gen_helper_srst(tcg_env, r1, r2);
4579 set_cc_static(s);
4580 return DISAS_NEXT;
4583 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4585 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4586 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4588 gen_helper_srstu(tcg_env, r1, r2);
4589 set_cc_static(s);
4590 return DISAS_NEXT;
4593 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4595 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4596 return DISAS_NEXT;
4599 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4601 tcg_gen_movi_i64(cc_src, 0);
4602 tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4603 return DISAS_NEXT;
4606 /* Compute borrow (0, -1) into cc_src. */
4607 static void compute_borrow(DisasContext *s)
4609 switch (s->cc_op) {
4610 case CC_OP_SUBU:
4611 /* The borrow value is already in cc_src (0,-1). */
4612 break;
4613 default:
4614 gen_op_calc_cc(s);
4615 /* fall through */
4616 case CC_OP_STATIC:
4617 /* The carry flag is the msb of CC; compute into cc_src. */
4618 tcg_gen_extu_i32_i64(cc_src, cc_op);
4619 tcg_gen_shri_i64(cc_src, cc_src, 1);
4620 /* fall through */
4621 case CC_OP_ADDU:
4622 /* Convert carry (1,0) to borrow (0,-1). */
4623 tcg_gen_subi_i64(cc_src, cc_src, 1);
4624 break;
4628 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4630 compute_borrow(s);
4632 /* Borrow is {0, -1}, so add to subtract. */
4633 tcg_gen_add_i64(o->out, o->in1, cc_src);
4634 tcg_gen_sub_i64(o->out, o->out, o->in2);
4635 return DISAS_NEXT;
4638 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4640 compute_borrow(s);
4643 * Borrow is {0, -1}, so add to subtract; replicate the
4644 * borrow input to produce 128-bit -1 for the addition.
4646 TCGv_i64 zero = tcg_constant_i64(0);
4647 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4648 tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4650 return DISAS_NEXT;
4653 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4655 TCGv_i32 t;
4657 update_psw_addr(s);
4658 update_cc_op(s);
4660 t = tcg_constant_i32(get_field(s, i1) & 0xff);
4661 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4663 t = tcg_constant_i32(s->ilen);
4664 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4666 gen_exception(EXCP_SVC);
4667 return DISAS_NORETURN;
4670 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4672 int cc = 0;
4674 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4675 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4676 gen_op_movi_cc(s, cc);
4677 return DISAS_NEXT;
4680 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4682 gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4683 set_cc_static(s);
4684 return DISAS_NEXT;
4687 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4689 gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4690 set_cc_static(s);
4691 return DISAS_NEXT;
4694 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4696 gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4697 set_cc_static(s);
4698 return DISAS_NEXT;
4701 #ifndef CONFIG_USER_ONLY
4703 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4705 gen_helper_testblock(cc_op, tcg_env, o->in2);
4706 set_cc_static(s);
4707 return DISAS_NEXT;
4710 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4712 gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4713 set_cc_static(s);
4714 return DISAS_NEXT;
4717 #endif
4719 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4721 TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4723 gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4724 set_cc_static(s);
4725 return DISAS_NEXT;
4728 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4730 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4732 gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4733 set_cc_static(s);
4734 return DISAS_NEXT;
4737 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4739 TCGv_i128 pair = tcg_temp_new_i128();
4741 gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4742 tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4743 set_cc_static(s);
4744 return DISAS_NEXT;
4747 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4749 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4751 gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4752 set_cc_static(s);
4753 return DISAS_NEXT;
4756 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4758 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4760 gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4761 set_cc_static(s);
4762 return DISAS_NEXT;
4765 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4767 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4768 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4769 TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4770 TCGv_i32 tst = tcg_temp_new_i32();
4771 int m3 = get_field(s, m3);
4773 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4774 m3 = 0;
4776 if (m3 & 1) {
4777 tcg_gen_movi_i32(tst, -1);
4778 } else {
4779 tcg_gen_extrl_i64_i32(tst, regs[0]);
4780 if (s->insn->opc & 3) {
4781 tcg_gen_ext8u_i32(tst, tst);
4782 } else {
4783 tcg_gen_ext16u_i32(tst, tst);
4786 gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4788 set_cc_static(s);
4789 return DISAS_NEXT;
4792 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4794 TCGv_i32 t1 = tcg_constant_i32(0xff);
4796 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4797 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4798 set_cc_static(s);
4799 return DISAS_NEXT;
4802 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4804 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4806 gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4807 return DISAS_NEXT;
4810 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4812 int l1 = get_field(s, l1) + 1;
4813 TCGv_i32 l;
4815 /* The length must not exceed 32 bytes. */
4816 if (l1 > 32) {
4817 gen_program_exception(s, PGM_SPECIFICATION);
4818 return DISAS_NORETURN;
4820 l = tcg_constant_i32(l1);
4821 gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4822 set_cc_static(s);
4823 return DISAS_NEXT;
4826 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4828 int l1 = get_field(s, l1) + 1;
4829 TCGv_i32 l;
4831 /* The length must be even and should not exceed 64 bytes. */
4832 if ((l1 & 1) || (l1 > 64)) {
4833 gen_program_exception(s, PGM_SPECIFICATION);
4834 return DISAS_NORETURN;
4836 l = tcg_constant_i32(l1);
4837 gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4838 set_cc_static(s);
4839 return DISAS_NEXT;
4843 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4845 int d1 = get_field(s, d1);
4846 int d2 = get_field(s, d2);
4847 int b1 = get_field(s, b1);
4848 int b2 = get_field(s, b2);
4849 int l = get_field(s, l1);
4850 TCGv_i32 t32;
4852 o->addr1 = get_address(s, 0, b1, d1);
4854 /* If the addresses are identical, this is a store/memset of zero. */
4855 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4856 o->in2 = tcg_constant_i64(0);
4858 l++;
4859 while (l >= 8) {
4860 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4861 l -= 8;
4862 if (l > 0) {
4863 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4866 if (l >= 4) {
4867 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4868 l -= 4;
4869 if (l > 0) {
4870 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4873 if (l >= 2) {
4874 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4875 l -= 2;
4876 if (l > 0) {
4877 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4880 if (l) {
4881 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4883 gen_op_movi_cc(s, 0);
4884 return DISAS_NEXT;
4887 /* But in general we'll defer to a helper. */
4888 o->in2 = get_address(s, 0, b2, d2);
4889 t32 = tcg_constant_i32(l);
4890 gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4891 set_cc_static(s);
4892 return DISAS_NEXT;
4895 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4897 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4898 return DISAS_NEXT;
4901 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4903 int shift = s->insn->data & 0xff;
4904 int size = s->insn->data >> 8;
4905 uint64_t mask = ((1ull << size) - 1) << shift;
4906 TCGv_i64 t = tcg_temp_new_i64();
4908 tcg_gen_shli_i64(t, o->in2, shift);
4909 tcg_gen_xor_i64(o->out, o->in1, t);
4911 /* Produce the CC from only the bits manipulated. */
4912 tcg_gen_andi_i64(cc_dst, o->out, mask);
4913 set_cc_nz_u64(s, cc_dst);
4914 return DISAS_NEXT;
4917 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4919 o->in1 = tcg_temp_new_i64();
4921 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4922 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4923 } else {
4924 /* Perform the atomic operation in memory. */
4925 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4926 s->insn->data);
4929 /* Recompute also for atomic case: needed for setting CC. */
4930 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4932 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4933 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4935 return DISAS_NEXT;
4938 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4940 o->out = tcg_constant_i64(0);
4941 return DISAS_NEXT;
4944 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4946 o->out = tcg_constant_i64(0);
4947 o->out2 = o->out;
4948 return DISAS_NEXT;
4951 #ifndef CONFIG_USER_ONLY
4952 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4954 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4956 gen_helper_clp(tcg_env, r2);
4957 set_cc_static(s);
4958 return DISAS_NEXT;
4961 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4963 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4964 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4966 gen_helper_pcilg(tcg_env, r1, r2);
4967 set_cc_static(s);
4968 return DISAS_NEXT;
4971 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4973 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4974 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4976 gen_helper_pcistg(tcg_env, r1, r2);
4977 set_cc_static(s);
4978 return DISAS_NEXT;
4981 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4983 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4984 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4986 gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4987 set_cc_static(s);
4988 return DISAS_NEXT;
4991 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4993 gen_helper_sic(tcg_env, o->in1, o->in2);
4994 return DISAS_NEXT;
4997 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4999 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5000 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
5002 gen_helper_rpcit(tcg_env, r1, r2);
5003 set_cc_static(s);
5004 return DISAS_NEXT;
5007 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5009 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5010 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
5011 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5013 gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
5014 set_cc_static(s);
5015 return DISAS_NEXT;
5018 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5020 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5021 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5023 gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
5024 set_cc_static(s);
5025 return DISAS_NEXT;
5027 #endif
5029 #include "translate_vx.c.inc"
5031 /* ====================================================================== */
5032 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5033 the original inputs), update the various cc data structures in order to
5034 be able to compute the new condition code. */
5036 static void cout_abs32(DisasContext *s, DisasOps *o)
5038 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5041 static void cout_abs64(DisasContext *s, DisasOps *o)
5043 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5046 static void cout_adds32(DisasContext *s, DisasOps *o)
5048 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5051 static void cout_adds64(DisasContext *s, DisasOps *o)
5053 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5056 static void cout_addu32(DisasContext *s, DisasOps *o)
5058 tcg_gen_shri_i64(cc_src, o->out, 32);
5059 tcg_gen_ext32u_i64(cc_dst, o->out);
5060 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5063 static void cout_addu64(DisasContext *s, DisasOps *o)
5065 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5068 static void cout_cmps32(DisasContext *s, DisasOps *o)
5070 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5073 static void cout_cmps64(DisasContext *s, DisasOps *o)
5075 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5078 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5080 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5083 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5085 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5088 static void cout_f32(DisasContext *s, DisasOps *o)
5090 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5093 static void cout_f64(DisasContext *s, DisasOps *o)
5095 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5098 static void cout_f128(DisasContext *s, DisasOps *o)
5100 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5103 static void cout_nabs32(DisasContext *s, DisasOps *o)
5105 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5108 static void cout_nabs64(DisasContext *s, DisasOps *o)
5110 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5113 static void cout_neg32(DisasContext *s, DisasOps *o)
5115 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5118 static void cout_neg64(DisasContext *s, DisasOps *o)
5120 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5123 static void cout_nz32(DisasContext *s, DisasOps *o)
5125 tcg_gen_ext32u_i64(cc_dst, o->out);
5126 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5129 static void cout_nz64(DisasContext *s, DisasOps *o)
5131 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5134 static void cout_s32(DisasContext *s, DisasOps *o)
5136 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5139 static void cout_s64(DisasContext *s, DisasOps *o)
5141 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5144 static void cout_subs32(DisasContext *s, DisasOps *o)
5146 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5149 static void cout_subs64(DisasContext *s, DisasOps *o)
5151 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5154 static void cout_subu32(DisasContext *s, DisasOps *o)
5156 tcg_gen_sari_i64(cc_src, o->out, 32);
5157 tcg_gen_ext32u_i64(cc_dst, o->out);
5158 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5161 static void cout_subu64(DisasContext *s, DisasOps *o)
5163 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5166 static void cout_tm32(DisasContext *s, DisasOps *o)
5168 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5171 static void cout_tm64(DisasContext *s, DisasOps *o)
5173 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5176 static void cout_muls32(DisasContext *s, DisasOps *o)
5178 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5181 static void cout_muls64(DisasContext *s, DisasOps *o)
5183 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5184 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5187 /* ====================================================================== */
5188 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5189 with the TCG register to which we will write. Used in combination with
5190 the "wout" generators, in some cases we need a new temporary, and in
5191 some cases we can write to a TCG global. */
5193 static void prep_new(DisasContext *s, DisasOps *o)
5195 o->out = tcg_temp_new_i64();
5197 #define SPEC_prep_new 0
5199 static void prep_new_P(DisasContext *s, DisasOps *o)
5201 o->out = tcg_temp_new_i64();
5202 o->out2 = tcg_temp_new_i64();
5204 #define SPEC_prep_new_P 0
5206 static void prep_new_x(DisasContext *s, DisasOps *o)
5208 o->out_128 = tcg_temp_new_i128();
5210 #define SPEC_prep_new_x 0
5212 static void prep_r1(DisasContext *s, DisasOps *o)
5214 o->out = regs[get_field(s, r1)];
5216 #define SPEC_prep_r1 0
5218 static void prep_r1_P(DisasContext *s, DisasOps *o)
5220 int r1 = get_field(s, r1);
5221 o->out = regs[r1];
5222 o->out2 = regs[r1 + 1];
5224 #define SPEC_prep_r1_P SPEC_r1_even
5226 /* ====================================================================== */
5227 /* The "Write OUTput" generators. These generally perform some non-trivial
5228 copy of data to TCG globals, or to main memory. The trivial cases are
5229 generally handled by having a "prep" generator install the TCG global
5230 as the destination of the operation. */
5232 static void wout_r1(DisasContext *s, DisasOps *o)
5234 store_reg(get_field(s, r1), o->out);
5236 #define SPEC_wout_r1 0
5238 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5240 store_reg(get_field(s, r1), o->out2);
5242 #define SPEC_wout_out2_r1 0
5244 static void wout_r1_8(DisasContext *s, DisasOps *o)
5246 int r1 = get_field(s, r1);
5247 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5249 #define SPEC_wout_r1_8 0
5251 static void wout_r1_16(DisasContext *s, DisasOps *o)
5253 int r1 = get_field(s, r1);
5254 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5256 #define SPEC_wout_r1_16 0
5258 static void wout_r1_32(DisasContext *s, DisasOps *o)
5260 store_reg32_i64(get_field(s, r1), o->out);
5262 #define SPEC_wout_r1_32 0
5264 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5266 store_reg32h_i64(get_field(s, r1), o->out);
5268 #define SPEC_wout_r1_32h 0
5270 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5272 int r1 = get_field(s, r1);
5273 store_reg32_i64(r1, o->out);
5274 store_reg32_i64(r1 + 1, o->out2);
5276 #define SPEC_wout_r1_P32 SPEC_r1_even
5278 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5280 int r1 = get_field(s, r1);
5281 TCGv_i64 t = tcg_temp_new_i64();
5282 store_reg32_i64(r1 + 1, o->out);
5283 tcg_gen_shri_i64(t, o->out, 32);
5284 store_reg32_i64(r1, t);
5286 #define SPEC_wout_r1_D32 SPEC_r1_even
5288 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5290 int r1 = get_field(s, r1);
5291 tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5293 #define SPEC_wout_r1_D64 SPEC_r1_even
5295 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5297 int r3 = get_field(s, r3);
5298 store_reg32_i64(r3, o->out);
5299 store_reg32_i64(r3 + 1, o->out2);
5301 #define SPEC_wout_r3_P32 SPEC_r3_even
5303 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5305 int r3 = get_field(s, r3);
5306 store_reg(r3, o->out);
5307 store_reg(r3 + 1, o->out2);
5309 #define SPEC_wout_r3_P64 SPEC_r3_even
5311 static void wout_e1(DisasContext *s, DisasOps *o)
5313 store_freg32_i64(get_field(s, r1), o->out);
5315 #define SPEC_wout_e1 0
5317 static void wout_f1(DisasContext *s, DisasOps *o)
5319 store_freg(get_field(s, r1), o->out);
5321 #define SPEC_wout_f1 0
5323 static void wout_x1(DisasContext *s, DisasOps *o)
5325 int f1 = get_field(s, r1);
5327 /* Split out_128 into out+out2 for cout_f128. */
5328 tcg_debug_assert(o->out == NULL);
5329 o->out = tcg_temp_new_i64();
5330 o->out2 = tcg_temp_new_i64();
5332 tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5333 store_freg(f1, o->out);
5334 store_freg(f1 + 2, o->out2);
5336 #define SPEC_wout_x1 SPEC_r1_f128
5338 static void wout_x1_P(DisasContext *s, DisasOps *o)
5340 int f1 = get_field(s, r1);
5341 store_freg(f1, o->out);
5342 store_freg(f1 + 2, o->out2);
5344 #define SPEC_wout_x1_P SPEC_r1_f128
5346 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5348 if (get_field(s, r1) != get_field(s, r2)) {
5349 store_reg32_i64(get_field(s, r1), o->out);
5352 #define SPEC_wout_cond_r1r2_32 0
5354 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5356 if (get_field(s, r1) != get_field(s, r2)) {
5357 store_freg32_i64(get_field(s, r1), o->out);
5360 #define SPEC_wout_cond_e1e2 0
5362 static void wout_m1_8(DisasContext *s, DisasOps *o)
5364 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5366 #define SPEC_wout_m1_8 0
5368 static void wout_m1_16(DisasContext *s, DisasOps *o)
5370 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5372 #define SPEC_wout_m1_16 0
5374 #ifndef CONFIG_USER_ONLY
5375 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5377 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5379 #define SPEC_wout_m1_16a 0
5380 #endif
5382 static void wout_m1_32(DisasContext *s, DisasOps *o)
5384 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5386 #define SPEC_wout_m1_32 0
5388 #ifndef CONFIG_USER_ONLY
5389 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5391 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5393 #define SPEC_wout_m1_32a 0
5394 #endif
5396 static void wout_m1_64(DisasContext *s, DisasOps *o)
5398 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5400 #define SPEC_wout_m1_64 0
5402 #ifndef CONFIG_USER_ONLY
5403 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5405 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5407 #define SPEC_wout_m1_64a 0
5408 #endif
5410 static void wout_m2_32(DisasContext *s, DisasOps *o)
5412 tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5414 #define SPEC_wout_m2_32 0
5416 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5418 store_reg(get_field(s, r1), o->in2);
5420 #define SPEC_wout_in2_r1 0
5422 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5424 store_reg32_i64(get_field(s, r1), o->in2);
5426 #define SPEC_wout_in2_r1_32 0
5428 /* ====================================================================== */
5429 /* The "INput 1" generators. These load the first operand to an insn. */
5431 static void in1_r1(DisasContext *s, DisasOps *o)
5433 o->in1 = load_reg(get_field(s, r1));
5435 #define SPEC_in1_r1 0
5437 static void in1_r1_o(DisasContext *s, DisasOps *o)
5439 o->in1 = regs[get_field(s, r1)];
5441 #define SPEC_in1_r1_o 0
5443 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5445 o->in1 = tcg_temp_new_i64();
5446 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5448 #define SPEC_in1_r1_32s 0
5450 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5452 o->in1 = tcg_temp_new_i64();
5453 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5455 #define SPEC_in1_r1_32u 0
5457 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5459 o->in1 = tcg_temp_new_i64();
5460 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5462 #define SPEC_in1_r1_sr32 0
5464 static void in1_r1p1(DisasContext *s, DisasOps *o)
5466 o->in1 = load_reg(get_field(s, r1) + 1);
5468 #define SPEC_in1_r1p1 SPEC_r1_even
5470 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5472 o->in1 = regs[get_field(s, r1) + 1];
5474 #define SPEC_in1_r1p1_o SPEC_r1_even
5476 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5478 o->in1 = tcg_temp_new_i64();
5479 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5481 #define SPEC_in1_r1p1_32s SPEC_r1_even
5483 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5485 o->in1 = tcg_temp_new_i64();
5486 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5488 #define SPEC_in1_r1p1_32u SPEC_r1_even
5490 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5492 int r1 = get_field(s, r1);
5493 o->in1 = tcg_temp_new_i64();
5494 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5496 #define SPEC_in1_r1_D32 SPEC_r1_even
5498 static void in1_r2(DisasContext *s, DisasOps *o)
5500 o->in1 = load_reg(get_field(s, r2));
5502 #define SPEC_in1_r2 0
5504 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5506 o->in1 = tcg_temp_new_i64();
5507 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5509 #define SPEC_in1_r2_sr32 0
5511 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5513 o->in1 = tcg_temp_new_i64();
5514 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5516 #define SPEC_in1_r2_32u 0
5518 static void in1_r3(DisasContext *s, DisasOps *o)
5520 o->in1 = load_reg(get_field(s, r3));
5522 #define SPEC_in1_r3 0
5524 static void in1_r3_o(DisasContext *s, DisasOps *o)
5526 o->in1 = regs[get_field(s, r3)];
5528 #define SPEC_in1_r3_o 0
5530 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5532 o->in1 = tcg_temp_new_i64();
5533 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5535 #define SPEC_in1_r3_32s 0
5537 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5539 o->in1 = tcg_temp_new_i64();
5540 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5542 #define SPEC_in1_r3_32u 0
5544 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5546 int r3 = get_field(s, r3);
5547 o->in1 = tcg_temp_new_i64();
5548 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5550 #define SPEC_in1_r3_D32 SPEC_r3_even
5552 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5554 o->in1 = tcg_temp_new_i64();
5555 tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5557 #define SPEC_in1_r3_sr32 0
5559 static void in1_e1(DisasContext *s, DisasOps *o)
5561 o->in1 = load_freg32_i64(get_field(s, r1));
5563 #define SPEC_in1_e1 0
5565 static void in1_f1(DisasContext *s, DisasOps *o)
5567 o->in1 = load_freg(get_field(s, r1));
5569 #define SPEC_in1_f1 0
5571 static void in1_x1(DisasContext *s, DisasOps *o)
5573 o->in1_128 = load_freg_128(get_field(s, r1));
5575 #define SPEC_in1_x1 SPEC_r1_f128
5577 /* Load the high double word of an extended (128-bit) format FP number */
5578 static void in1_x2h(DisasContext *s, DisasOps *o)
5580 o->in1 = load_freg(get_field(s, r2));
5582 #define SPEC_in1_x2h SPEC_r2_f128
5584 static void in1_f3(DisasContext *s, DisasOps *o)
5586 o->in1 = load_freg(get_field(s, r3));
5588 #define SPEC_in1_f3 0
5590 static void in1_la1(DisasContext *s, DisasOps *o)
5592 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5594 #define SPEC_in1_la1 0
5596 static void in1_la2(DisasContext *s, DisasOps *o)
5598 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5599 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5601 #define SPEC_in1_la2 0
5603 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5605 in1_la1(s, o);
5606 o->in1 = tcg_temp_new_i64();
5607 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5609 #define SPEC_in1_m1_8u 0
5611 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5613 in1_la1(s, o);
5614 o->in1 = tcg_temp_new_i64();
5615 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5617 #define SPEC_in1_m1_16s 0
5619 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5621 in1_la1(s, o);
5622 o->in1 = tcg_temp_new_i64();
5623 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5625 #define SPEC_in1_m1_16u 0
5627 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5629 in1_la1(s, o);
5630 o->in1 = tcg_temp_new_i64();
5631 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5633 #define SPEC_in1_m1_32s 0
5635 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5637 in1_la1(s, o);
5638 o->in1 = tcg_temp_new_i64();
5639 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5641 #define SPEC_in1_m1_32u 0
5643 static void in1_m1_64(DisasContext *s, DisasOps *o)
5645 in1_la1(s, o);
5646 o->in1 = tcg_temp_new_i64();
5647 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5649 #define SPEC_in1_m1_64 0
5651 /* ====================================================================== */
5652 /* The "INput 2" generators. These load the second operand to an insn. */
5654 static void in2_r1_o(DisasContext *s, DisasOps *o)
5656 o->in2 = regs[get_field(s, r1)];
5658 #define SPEC_in2_r1_o 0
5660 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5662 o->in2 = tcg_temp_new_i64();
5663 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5665 #define SPEC_in2_r1_16u 0
5667 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5669 o->in2 = tcg_temp_new_i64();
5670 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5672 #define SPEC_in2_r1_32u 0
5674 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5676 int r1 = get_field(s, r1);
5677 o->in2 = tcg_temp_new_i64();
5678 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5680 #define SPEC_in2_r1_D32 SPEC_r1_even
5682 static void in2_r2(DisasContext *s, DisasOps *o)
5684 o->in2 = load_reg(get_field(s, r2));
5686 #define SPEC_in2_r2 0
5688 static void in2_r2_o(DisasContext *s, DisasOps *o)
5690 o->in2 = regs[get_field(s, r2)];
5692 #define SPEC_in2_r2_o 0
5694 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5696 int r2 = get_field(s, r2);
5697 if (r2 != 0) {
5698 o->in2 = load_reg(r2);
5701 #define SPEC_in2_r2_nz 0
5703 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5705 o->in2 = tcg_temp_new_i64();
5706 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5708 #define SPEC_in2_r2_8s 0
5710 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5712 o->in2 = tcg_temp_new_i64();
5713 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5715 #define SPEC_in2_r2_8u 0
5717 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5719 o->in2 = tcg_temp_new_i64();
5720 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5722 #define SPEC_in2_r2_16s 0
5724 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5726 o->in2 = tcg_temp_new_i64();
5727 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5729 #define SPEC_in2_r2_16u 0
5731 static void in2_r3(DisasContext *s, DisasOps *o)
5733 o->in2 = load_reg(get_field(s, r3));
5735 #define SPEC_in2_r3 0
5737 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5739 int r3 = get_field(s, r3);
5740 o->in2_128 = tcg_temp_new_i128();
5741 tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5743 #define SPEC_in2_r3_D64 SPEC_r3_even
5745 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5747 o->in2 = tcg_temp_new_i64();
5748 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5750 #define SPEC_in2_r3_sr32 0
5752 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5754 o->in2 = tcg_temp_new_i64();
5755 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5757 #define SPEC_in2_r3_32u 0
5759 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5761 o->in2 = tcg_temp_new_i64();
5762 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5764 #define SPEC_in2_r2_32s 0
5766 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5768 o->in2 = tcg_temp_new_i64();
5769 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5771 #define SPEC_in2_r2_32u 0
5773 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5775 o->in2 = tcg_temp_new_i64();
5776 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5778 #define SPEC_in2_r2_sr32 0
5780 static void in2_e2(DisasContext *s, DisasOps *o)
5782 o->in2 = load_freg32_i64(get_field(s, r2));
5784 #define SPEC_in2_e2 0
5786 static void in2_f2(DisasContext *s, DisasOps *o)
5788 o->in2 = load_freg(get_field(s, r2));
5790 #define SPEC_in2_f2 0
5792 static void in2_x2(DisasContext *s, DisasOps *o)
5794 o->in2_128 = load_freg_128(get_field(s, r2));
5796 #define SPEC_in2_x2 SPEC_r2_f128
5798 /* Load the low double word of an extended (128-bit) format FP number */
5799 static void in2_x2l(DisasContext *s, DisasOps *o)
5801 o->in2 = load_freg(get_field(s, r2) + 2);
5803 #define SPEC_in2_x2l SPEC_r2_f128
5805 static void in2_ra2(DisasContext *s, DisasOps *o)
5807 int r2 = get_field(s, r2);
5809 /* Note: *don't* treat !r2 as 0, use the reg value. */
5810 o->in2 = tcg_temp_new_i64();
5811 gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5813 #define SPEC_in2_ra2 0
5815 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5817 return in2_ra2(s, o);
5819 #define SPEC_in2_ra2_E SPEC_r2_even
5821 static void in2_a2(DisasContext *s, DisasOps *o)
5823 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5824 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5826 #define SPEC_in2_a2 0
5828 static TCGv gen_ri2(DisasContext *s)
5830 TCGv ri2 = NULL;
5831 bool is_imm;
5832 int imm;
5834 disas_jdest(s, i2, is_imm, imm, ri2);
5835 if (is_imm) {
5836 ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5839 return ri2;
5842 static void in2_ri2(DisasContext *s, DisasOps *o)
5844 o->in2 = gen_ri2(s);
5846 #define SPEC_in2_ri2 0
5848 static void in2_sh(DisasContext *s, DisasOps *o)
5850 int b2 = get_field(s, b2);
5851 int d2 = get_field(s, d2);
5853 if (b2 == 0) {
5854 o->in2 = tcg_constant_i64(d2 & 0x3f);
5855 } else {
5856 o->in2 = get_address(s, 0, b2, d2);
5857 tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5860 #define SPEC_in2_sh 0
5862 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5864 in2_a2(s, o);
5865 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5867 #define SPEC_in2_m2_8u 0
5869 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5871 in2_a2(s, o);
5872 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5874 #define SPEC_in2_m2_16s 0
5876 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5878 in2_a2(s, o);
5879 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5881 #define SPEC_in2_m2_16u 0
5883 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5885 in2_a2(s, o);
5886 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5888 #define SPEC_in2_m2_32s 0
5890 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5892 in2_a2(s, o);
5893 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5895 #define SPEC_in2_m2_32u 0
5897 #ifndef CONFIG_USER_ONLY
5898 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5900 in2_a2(s, o);
5901 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5903 #define SPEC_in2_m2_32ua 0
5904 #endif
5906 static void in2_m2_64(DisasContext *s, DisasOps *o)
5908 in2_a2(s, o);
5909 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5911 #define SPEC_in2_m2_64 0
5913 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5915 in2_a2(s, o);
5916 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5917 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5919 #define SPEC_in2_m2_64w 0
5921 #ifndef CONFIG_USER_ONLY
5922 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5924 in2_a2(s, o);
5925 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5927 #define SPEC_in2_m2_64a 0
5928 #endif
5930 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5932 o->in2 = tcg_temp_new_i64();
5933 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5935 #define SPEC_in2_mri2_16s 0
5937 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5939 o->in2 = tcg_temp_new_i64();
5940 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5942 #define SPEC_in2_mri2_16u 0
5944 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5946 o->in2 = tcg_temp_new_i64();
5947 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5948 MO_TESL | MO_ALIGN);
5950 #define SPEC_in2_mri2_32s 0
5952 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5954 o->in2 = tcg_temp_new_i64();
5955 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5956 MO_TEUL | MO_ALIGN);
5958 #define SPEC_in2_mri2_32u 0
5960 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5962 o->in2 = tcg_temp_new_i64();
5963 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5964 MO_TEUQ | MO_ALIGN);
5966 #define SPEC_in2_mri2_64 0
5968 static void in2_i2(DisasContext *s, DisasOps *o)
5970 o->in2 = tcg_constant_i64(get_field(s, i2));
5972 #define SPEC_in2_i2 0
5974 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5976 o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5978 #define SPEC_in2_i2_8u 0
5980 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5982 o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5984 #define SPEC_in2_i2_16u 0
5986 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5988 o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5990 #define SPEC_in2_i2_32u 0
5992 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5994 uint64_t i2 = (uint16_t)get_field(s, i2);
5995 o->in2 = tcg_constant_i64(i2 << s->insn->data);
5997 #define SPEC_in2_i2_16u_shl 0
5999 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6001 uint64_t i2 = (uint32_t)get_field(s, i2);
6002 o->in2 = tcg_constant_i64(i2 << s->insn->data);
6004 #define SPEC_in2_i2_32u_shl 0
6006 #ifndef CONFIG_USER_ONLY
6007 static void in2_insn(DisasContext *s, DisasOps *o)
6009 o->in2 = tcg_constant_i64(s->fields.raw_insn);
6011 #define SPEC_in2_insn 0
6012 #endif
6014 /* ====================================================================== */
6016 /* Find opc within the table of insns. This is formulated as a switch
6017 statement so that (1) we get compile-time notice of cut-paste errors
6018 for duplicated opcodes, and (2) the compiler generates the binary
6019 search tree, rather than us having to post-process the table. */
6021 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6022 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6024 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6025 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6027 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6028 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6030 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6032 enum DisasInsnEnum {
6033 #include "insn-data.h.inc"
6036 #undef E
6037 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6038 .opc = OPC, \
6039 .flags = FL, \
6040 .fmt = FMT_##FT, \
6041 .fac = FAC_##FC, \
6042 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6043 .name = #NM, \
6044 .help_in1 = in1_##I1, \
6045 .help_in2 = in2_##I2, \
6046 .help_prep = prep_##P, \
6047 .help_wout = wout_##W, \
6048 .help_cout = cout_##CC, \
6049 .help_op = op_##OP, \
6050 .data = D \
6053 /* Allow 0 to be used for NULL in the table below. */
6054 #define in1_0 NULL
6055 #define in2_0 NULL
6056 #define prep_0 NULL
6057 #define wout_0 NULL
6058 #define cout_0 NULL
6059 #define op_0 NULL
6061 #define SPEC_in1_0 0
6062 #define SPEC_in2_0 0
6063 #define SPEC_prep_0 0
6064 #define SPEC_wout_0 0
6066 /* Give smaller names to the various facilities. */
6067 #define FAC_Z S390_FEAT_ZARCH
6068 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6069 #define FAC_DFP S390_FEAT_DFP
6070 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6071 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6072 #define FAC_EE S390_FEAT_EXECUTE_EXT
6073 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6074 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6075 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6076 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6077 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6078 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6079 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6080 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6081 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6082 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6083 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6084 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6085 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6086 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6087 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6088 #define FAC_SFLE S390_FEAT_STFLE
6089 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6090 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6091 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6092 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6093 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6094 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6095 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6096 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6097 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6098 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6099 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6100 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6101 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6102 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6103 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6104 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6105 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6106 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6107 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6108 #define FAC_VE2 S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6109 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6110 #define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6112 static const DisasInsn insn_info[] = {
6113 #include "insn-data.h.inc"
6116 #undef E
6117 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6118 case OPC: return &insn_info[insn_ ## NM];
6120 static const DisasInsn *lookup_opc(uint16_t opc)
6122 switch (opc) {
6123 #include "insn-data.h.inc"
6124 default:
6125 return NULL;
6129 #undef F
6130 #undef E
6131 #undef D
6132 #undef C
6134 /* Extract a field from the insn. The INSN should be left-aligned in
6135 the uint64_t so that we can more easily utilize the big-bit-endian
6136 definitions we extract from the Principals of Operation. */
6138 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6140 uint32_t r, m;
6142 if (f->size == 0) {
6143 return;
6146 /* Zero extract the field from the insn. */
6147 r = (insn << f->beg) >> (64 - f->size);
6149 /* Sign-extend, or un-swap the field as necessary. */
6150 switch (f->type) {
6151 case 0: /* unsigned */
6152 break;
6153 case 1: /* signed */
6154 assert(f->size <= 32);
6155 m = 1u << (f->size - 1);
6156 r = (r ^ m) - m;
6157 break;
6158 case 2: /* dl+dh split, signed 20 bit. */
6159 r = ((int8_t)r << 12) | (r >> 8);
6160 break;
6161 case 3: /* MSB stored in RXB */
6162 g_assert(f->size == 4);
6163 switch (f->beg) {
6164 case 8:
6165 r |= extract64(insn, 63 - 36, 1) << 4;
6166 break;
6167 case 12:
6168 r |= extract64(insn, 63 - 37, 1) << 4;
6169 break;
6170 case 16:
6171 r |= extract64(insn, 63 - 38, 1) << 4;
6172 break;
6173 case 32:
6174 r |= extract64(insn, 63 - 39, 1) << 4;
6175 break;
6176 default:
6177 g_assert_not_reached();
6179 break;
6180 default:
6181 abort();
6185 * Validate that the "compressed" encoding we selected above is valid.
6186 * I.e. we haven't made two different original fields overlap.
6188 assert(((o->presentC >> f->indexC) & 1) == 0);
6189 o->presentC |= 1 << f->indexC;
6190 o->presentO |= 1 << f->indexO;
6192 o->c[f->indexC] = r;
6195 /* Lookup the insn at the current PC, extracting the operands into O and
6196 returning the info struct for the insn. Returns NULL for invalid insn. */
6198 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6200 uint64_t insn, pc = s->base.pc_next;
6201 int op, op2, ilen;
6202 const DisasInsn *info;
6204 if (unlikely(s->ex_value)) {
6205 /* Drop the EX data now, so that it's clear on exception paths. */
6206 tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6207 offsetof(CPUS390XState, ex_value));
6209 /* Extract the values saved by EXECUTE. */
6210 insn = s->ex_value & 0xffffffffffff0000ull;
6211 ilen = s->ex_value & 0xf;
6213 /* Register insn bytes with translator so plugins work. */
6214 for (int i = 0; i < ilen; i++) {
6215 uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6216 translator_fake_ldb(byte, pc + i);
6218 op = insn >> 56;
6219 } else {
6220 insn = ld_code2(env, s, pc);
6221 op = (insn >> 8) & 0xff;
6222 ilen = get_ilen(op);
6223 switch (ilen) {
6224 case 2:
6225 insn = insn << 48;
6226 break;
6227 case 4:
6228 insn = ld_code4(env, s, pc) << 32;
6229 break;
6230 case 6:
6231 insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6232 break;
6233 default:
6234 g_assert_not_reached();
6237 s->pc_tmp = s->base.pc_next + ilen;
6238 s->ilen = ilen;
6240 /* We can't actually determine the insn format until we've looked up
6241 the full insn opcode. Which we can't do without locating the
6242 secondary opcode. Assume by default that OP2 is at bit 40; for
6243 those smaller insns that don't actually have a secondary opcode
6244 this will correctly result in OP2 = 0. */
6245 switch (op) {
6246 case 0x01: /* E */
6247 case 0x80: /* S */
6248 case 0x82: /* S */
6249 case 0x93: /* S */
6250 case 0xb2: /* S, RRF, RRE, IE */
6251 case 0xb3: /* RRE, RRD, RRF */
6252 case 0xb9: /* RRE, RRF */
6253 case 0xe5: /* SSE, SIL */
6254 op2 = (insn << 8) >> 56;
6255 break;
6256 case 0xa5: /* RI */
6257 case 0xa7: /* RI */
6258 case 0xc0: /* RIL */
6259 case 0xc2: /* RIL */
6260 case 0xc4: /* RIL */
6261 case 0xc6: /* RIL */
6262 case 0xc8: /* SSF */
6263 case 0xcc: /* RIL */
6264 op2 = (insn << 12) >> 60;
6265 break;
6266 case 0xc5: /* MII */
6267 case 0xc7: /* SMI */
6268 case 0xd0 ... 0xdf: /* SS */
6269 case 0xe1: /* SS */
6270 case 0xe2: /* SS */
6271 case 0xe8: /* SS */
6272 case 0xe9: /* SS */
6273 case 0xea: /* SS */
6274 case 0xee ... 0xf3: /* SS */
6275 case 0xf8 ... 0xfd: /* SS */
6276 op2 = 0;
6277 break;
6278 default:
6279 op2 = (insn << 40) >> 56;
6280 break;
6283 memset(&s->fields, 0, sizeof(s->fields));
6284 s->fields.raw_insn = insn;
6285 s->fields.op = op;
6286 s->fields.op2 = op2;
6288 /* Lookup the instruction. */
6289 info = lookup_opc(op << 8 | op2);
6290 s->insn = info;
6292 /* If we found it, extract the operands. */
6293 if (info != NULL) {
6294 DisasFormat fmt = info->fmt;
6295 int i;
6297 for (i = 0; i < NUM_C_FIELD; ++i) {
6298 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6301 return info;
6304 static bool is_afp_reg(int reg)
6306 return reg % 2 || reg > 6;
6309 static bool is_fp_pair(int reg)
6311 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6312 return !(reg & 0x2);
6315 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6317 const DisasInsn *insn;
6318 DisasJumpType ret = DISAS_NEXT;
6319 DisasOps o = {};
6320 bool icount = false;
6322 /* Search for the insn in the table. */
6323 insn = extract_insn(env, s);
6325 /* Update insn_start now that we know the ILEN. */
6326 tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6328 /* Not found means unimplemented/illegal opcode. */
6329 if (insn == NULL) {
6330 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6331 s->fields.op, s->fields.op2);
6332 gen_illegal_opcode(s);
6333 ret = DISAS_NORETURN;
6334 goto out;
6337 #ifndef CONFIG_USER_ONLY
6338 if (s->base.tb->flags & FLAG_MASK_PER) {
6339 TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6340 gen_helper_per_ifetch(tcg_env, addr);
6342 #endif
6344 /* process flags */
6345 if (insn->flags) {
6346 /* privileged instruction */
6347 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6348 gen_program_exception(s, PGM_PRIVILEGED);
6349 ret = DISAS_NORETURN;
6350 goto out;
6353 /* if AFP is not enabled, instructions and registers are forbidden */
6354 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6355 uint8_t dxc = 0;
6357 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6358 dxc = 1;
6360 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6361 dxc = 1;
6363 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6364 dxc = 1;
6366 if (insn->flags & IF_BFP) {
6367 dxc = 2;
6369 if (insn->flags & IF_DFP) {
6370 dxc = 3;
6372 if (insn->flags & IF_VEC) {
6373 dxc = 0xfe;
6375 if (dxc) {
6376 gen_data_exception(dxc);
6377 ret = DISAS_NORETURN;
6378 goto out;
6382 /* if vector instructions not enabled, executing them is forbidden */
6383 if (insn->flags & IF_VEC) {
6384 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6385 gen_data_exception(0xfe);
6386 ret = DISAS_NORETURN;
6387 goto out;
6391 /* input/output is the special case for icount mode */
6392 if (unlikely(insn->flags & IF_IO)) {
6393 icount = translator_io_start(&s->base);
6397 /* Check for insn specification exceptions. */
6398 if (insn->spec) {
6399 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6400 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6401 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6402 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6403 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6404 gen_program_exception(s, PGM_SPECIFICATION);
6405 ret = DISAS_NORETURN;
6406 goto out;
6410 /* Implement the instruction. */
6411 if (insn->help_in1) {
6412 insn->help_in1(s, &o);
6414 if (insn->help_in2) {
6415 insn->help_in2(s, &o);
6417 if (insn->help_prep) {
6418 insn->help_prep(s, &o);
6420 if (insn->help_op) {
6421 ret = insn->help_op(s, &o);
6423 if (ret != DISAS_NORETURN) {
6424 if (insn->help_wout) {
6425 insn->help_wout(s, &o);
6427 if (insn->help_cout) {
6428 insn->help_cout(s, &o);
6432 /* io should be the last instruction in tb when icount is enabled */
6433 if (unlikely(icount && ret == DISAS_NEXT)) {
6434 ret = DISAS_TOO_MANY;
6437 #ifndef CONFIG_USER_ONLY
6438 if (s->base.tb->flags & FLAG_MASK_PER) {
6439 /* An exception might be triggered, save PSW if not already done. */
6440 if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6441 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6444 /* Call the helper to check for a possible PER exception. */
6445 gen_helper_per_check_exception(tcg_env);
6447 #endif
6449 out:
6450 /* Advance to the next instruction. */
6451 s->base.pc_next = s->pc_tmp;
6452 return ret;
6455 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6457 DisasContext *dc = container_of(dcbase, DisasContext, base);
6459 /* 31-bit mode */
6460 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6461 dc->base.pc_first &= 0x7fffffff;
6462 dc->base.pc_next = dc->base.pc_first;
6465 dc->cc_op = CC_OP_DYNAMIC;
6466 dc->ex_value = dc->base.tb->cs_base;
6467 dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6470 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6474 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6476 DisasContext *dc = container_of(dcbase, DisasContext, base);
6478 /* Delay the set of ilen until we've read the insn. */
6479 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6480 dc->insn_start = tcg_last_op();
6483 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6484 uint64_t pc)
6486 uint64_t insn = cpu_lduw_code(env, pc);
6488 return pc + get_ilen((insn >> 8) & 0xff);
6491 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6493 CPUS390XState *env = cpu_env(cs);
6494 DisasContext *dc = container_of(dcbase, DisasContext, base);
6496 dc->base.is_jmp = translate_one(env, dc);
6497 if (dc->base.is_jmp == DISAS_NEXT) {
6498 if (dc->ex_value ||
6499 !is_same_page(dcbase, dc->base.pc_next) ||
6500 !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6501 dc->base.is_jmp = DISAS_TOO_MANY;
6506 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6508 DisasContext *dc = container_of(dcbase, DisasContext, base);
6510 switch (dc->base.is_jmp) {
6511 case DISAS_NORETURN:
6512 break;
6513 case DISAS_TOO_MANY:
6514 update_psw_addr(dc);
6515 /* FALLTHRU */
6516 case DISAS_PC_UPDATED:
6517 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6518 cc op type is in env */
6519 update_cc_op(dc);
6520 /* FALLTHRU */
6521 case DISAS_PC_CC_UPDATED:
6522 /* Exit the TB, either by raising a debug exception or by return. */
6523 if (dc->exit_to_mainloop) {
6524 tcg_gen_exit_tb(NULL, 0);
6525 } else {
6526 tcg_gen_lookup_and_goto_ptr();
6528 break;
6529 default:
6530 g_assert_not_reached();
6534 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6535 CPUState *cs, FILE *logfile)
6537 DisasContext *dc = container_of(dcbase, DisasContext, base);
6539 if (unlikely(dc->ex_value)) {
6540 /* ??? Unfortunately target_disas can't use host memory. */
6541 fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6542 } else {
6543 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6544 target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6548 static const TranslatorOps s390x_tr_ops = {
6549 .init_disas_context = s390x_tr_init_disas_context,
6550 .tb_start = s390x_tr_tb_start,
6551 .insn_start = s390x_tr_insn_start,
6552 .translate_insn = s390x_tr_translate_insn,
6553 .tb_stop = s390x_tr_tb_stop,
6554 .disas_log = s390x_tr_disas_log,
6557 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6558 vaddr pc, void *host_pc)
6560 DisasContext dc;
6562 translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6565 void s390x_restore_state_to_opc(CPUState *cs,
6566 const TranslationBlock *tb,
6567 const uint64_t *data)
6569 S390CPU *cpu = S390_CPU(cs);
6570 CPUS390XState *env = &cpu->env;
6571 int cc_op = data[1];
6573 env->psw.addr = data[0];
6575 /* Update the CC opcode if it is not already up-to-date. */
6576 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6577 env->cc_op = cc_op;
6580 /* Record ILEN. */
6581 env->int_pgm_ilen = data[2];