Merge remote-tracking branch 'remotes/stefanberger/tags/pull-tpm-2020-07-24-1' into...
[qemu/ar7.git] / target / s390x / translate.c
blob4f6f1e31cdfd345542a40ad6b76f60b368b8dd56
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
64 enum DisasFieldIndexO {
65 FLD_O_r1,
66 FLD_O_r2,
67 FLD_O_r3,
68 FLD_O_m1,
69 FLD_O_m3,
70 FLD_O_m4,
71 FLD_O_m5,
72 FLD_O_m6,
73 FLD_O_b1,
74 FLD_O_b2,
75 FLD_O_b4,
76 FLD_O_d1,
77 FLD_O_d2,
78 FLD_O_d4,
79 FLD_O_x2,
80 FLD_O_l1,
81 FLD_O_l2,
82 FLD_O_i1,
83 FLD_O_i2,
84 FLD_O_i3,
85 FLD_O_i4,
86 FLD_O_i5,
87 FLD_O_v1,
88 FLD_O_v2,
89 FLD_O_v3,
90 FLD_O_v4,
93 enum DisasFieldIndexC {
94 FLD_C_r1 = 0,
95 FLD_C_m1 = 0,
96 FLD_C_b1 = 0,
97 FLD_C_i1 = 0,
98 FLD_C_v1 = 0,
100 FLD_C_r2 = 1,
101 FLD_C_b2 = 1,
102 FLD_C_i2 = 1,
104 FLD_C_r3 = 2,
105 FLD_C_m3 = 2,
106 FLD_C_i3 = 2,
107 FLD_C_v3 = 2,
109 FLD_C_m4 = 3,
110 FLD_C_b4 = 3,
111 FLD_C_i4 = 3,
112 FLD_C_l1 = 3,
113 FLD_C_v4 = 3,
115 FLD_C_i5 = 4,
116 FLD_C_d1 = 4,
117 FLD_C_m5 = 4,
119 FLD_C_d2 = 5,
120 FLD_C_m6 = 5,
122 FLD_C_d4 = 6,
123 FLD_C_x2 = 6,
124 FLD_C_l2 = 6,
125 FLD_C_v2 = 6,
127 NUM_C_FIELD = 7
130 struct DisasFields {
131 uint64_t raw_insn;
132 unsigned op:8;
133 unsigned op2:8;
134 unsigned presentC:16;
135 unsigned int presentO;
136 int c[NUM_C_FIELD];
139 struct DisasContext {
140 DisasContextBase base;
141 const DisasInsn *insn;
142 DisasFields fields;
143 uint64_t ex_value;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 bool do_debug;
155 /* Information carried about a condition to be evaluated. */
156 typedef struct {
157 TCGCond cond:8;
158 bool is_64;
159 bool g1;
160 bool g2;
161 union {
162 struct { TCGv_i64 a, b; } s64;
163 struct { TCGv_i32 a, b; } s32;
164 } u;
165 } DisasCompare;
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 TCGv_i64 tmp;
176 if (s->base.tb->flags & FLAG_MASK_32) {
177 if (s->base.tb->flags & FLAG_MASK_64) {
178 tcg_gen_movi_i64(out, pc);
179 return;
181 pc |= 0x80000000;
183 assert(!(s->base.tb->flags & FLAG_MASK_64));
184 tmp = tcg_const_i64(pc);
185 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186 tcg_temp_free_i64(tmp);
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
201 void s390x_translate_init(void)
203 int i;
205 psw_addr = tcg_global_mem_new_i64(cpu_env,
206 offsetof(CPUS390XState, psw.addr),
207 "psw_addr");
208 psw_mask = tcg_global_mem_new_i64(cpu_env,
209 offsetof(CPUS390XState, psw.mask),
210 "psw_mask");
211 gbea = tcg_global_mem_new_i64(cpu_env,
212 offsetof(CPUS390XState, gbea),
213 "gbea");
215 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216 "cc_op");
217 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218 "cc_src");
219 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220 "cc_dst");
221 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222 "cc_vr");
224 for (i = 0; i < 16; i++) {
225 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226 regs[i] = tcg_global_mem_new(cpu_env,
227 offsetof(CPUS390XState, regs[i]),
228 cpu_reg_names[i]);
232 static inline int vec_full_reg_offset(uint8_t reg)
234 g_assert(reg < 32);
235 return offsetof(CPUS390XState, vregs[reg][0]);
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes = 1 << es;
242 int offs = enr * bytes;
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
252 * DW: [ 0] - [ 1]
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
258 * DW: [ 0] - [ 1]
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
266 g_assert(es <= MO_64);
267 #ifndef HOST_WORDS_BIGENDIAN
268 offs ^= (8 - bytes);
269 #endif
270 return offs + vec_full_reg_offset(reg);
273 static inline int freg64_offset(uint8_t reg)
275 g_assert(reg < 16);
276 return vec_reg_offset(reg, 0, MO_64);
279 static inline int freg32_offset(uint8_t reg)
281 g_assert(reg < 16);
282 return vec_reg_offset(reg, 0, MO_32);
285 static TCGv_i64 load_reg(int reg)
287 TCGv_i64 r = tcg_temp_new_i64();
288 tcg_gen_mov_i64(r, regs[reg]);
289 return r;
292 static TCGv_i64 load_freg(int reg)
294 TCGv_i64 r = tcg_temp_new_i64();
296 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297 return r;
300 static TCGv_i64 load_freg32_i64(int reg)
302 TCGv_i64 r = tcg_temp_new_i64();
304 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305 return r;
308 static void store_reg(int reg, TCGv_i64 v)
310 tcg_gen_mov_i64(regs[reg], v);
313 static void store_freg(int reg, TCGv_i64 v)
315 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
318 static void store_reg32_i64(int reg, TCGv_i64 v)
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
326 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
329 static void store_freg32_i64(int reg, TCGv_i64 v)
331 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
334 static void return_low128(TCGv_i64 dest)
336 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
339 static void update_psw_addr(DisasContext *s)
341 /* psw.addr */
342 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 static void per_branch(DisasContext *s, bool to_next)
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea, s->base.pc_next);
350 if (s->base.tb->flags & FLAG_MASK_PER) {
351 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352 gen_helper_per_branch(cpu_env, gbea, next_pc);
353 if (to_next) {
354 tcg_temp_free_i64(next_pc);
357 #endif
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361 TCGv_i64 arg1, TCGv_i64 arg2)
363 #ifndef CONFIG_USER_ONLY
364 if (s->base.tb->flags & FLAG_MASK_PER) {
365 TCGLabel *lab = gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 tcg_gen_movi_i64(gbea, s->base.pc_next);
369 gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 gen_set_label(lab);
372 } else {
373 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 tcg_temp_free_i64(pc);
377 #endif
380 static void per_breaking_event(DisasContext *s)
382 tcg_gen_movi_i64(gbea, s->base.pc_next);
385 static void update_cc_op(DisasContext *s)
387 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388 tcg_gen_movi_i32(cc_op, s->cc_op);
392 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
394 return (uint64_t)cpu_lduw_code(env, pc);
397 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
399 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
402 static int get_mem_index(DisasContext *s)
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 tcg_abort();
420 break;
422 #endif
425 static void gen_exception(int excp)
427 TCGv_i32 tmp = tcg_const_i32(excp);
428 gen_helper_exception(cpu_env, tmp);
429 tcg_temp_free_i32(tmp);
432 static void gen_program_exception(DisasContext *s, int code)
434 TCGv_i32 tmp;
436 /* Remember what pgm exeption this was. */
437 tmp = tcg_const_i32(code);
438 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
439 tcg_temp_free_i32(tmp);
441 tmp = tcg_const_i32(s->ilen);
442 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
443 tcg_temp_free_i32(tmp);
445 /* update the psw */
446 update_psw_addr(s);
448 /* Save off cc. */
449 update_cc_op(s);
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM);
455 static inline void gen_illegal_opcode(DisasContext *s)
457 gen_program_exception(s, PGM_OPERATION);
460 static inline void gen_data_exception(uint8_t dxc)
462 TCGv_i32 tmp = tcg_const_i32(dxc);
463 gen_helper_data_exception(cpu_env, tmp);
464 tcg_temp_free_i32(tmp);
467 static inline void gen_trap(DisasContext *s)
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
473 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
474 int64_t imm)
476 tcg_gen_addi_i64(dst, src, imm);
477 if (!(s->base.tb->flags & FLAG_MASK_64)) {
478 if (s->base.tb->flags & FLAG_MASK_32) {
479 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
480 } else {
481 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
486 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
488 TCGv_i64 tmp = tcg_temp_new_i64();
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
494 if (b2 && x2) {
495 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
496 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
497 } else if (b2) {
498 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
499 } else if (x2) {
500 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
501 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
502 if (s->base.tb->flags & FLAG_MASK_32) {
503 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
504 } else {
505 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
507 } else {
508 tcg_gen_movi_i64(tmp, d2);
511 return tmp;
514 static inline bool live_cc_data(DisasContext *s)
516 return (s->cc_op != CC_OP_DYNAMIC
517 && s->cc_op != CC_OP_STATIC
518 && s->cc_op > 3);
521 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
523 if (live_cc_data(s)) {
524 tcg_gen_discard_i64(cc_src);
525 tcg_gen_discard_i64(cc_dst);
526 tcg_gen_discard_i64(cc_vr);
528 s->cc_op = CC_OP_CONST0 + val;
531 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
533 if (live_cc_data(s)) {
534 tcg_gen_discard_i64(cc_src);
535 tcg_gen_discard_i64(cc_vr);
537 tcg_gen_mov_i64(cc_dst, dst);
538 s->cc_op = op;
541 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542 TCGv_i64 dst)
544 if (live_cc_data(s)) {
545 tcg_gen_discard_i64(cc_vr);
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 s->cc_op = op;
552 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
553 TCGv_i64 dst, TCGv_i64 vr)
555 tcg_gen_mov_i64(cc_src, src);
556 tcg_gen_mov_i64(cc_dst, dst);
557 tcg_gen_mov_i64(cc_vr, vr);
558 s->cc_op = op;
561 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
563 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
566 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
568 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
571 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
573 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
576 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
578 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext *s)
584 if (live_cc_data(s)) {
585 tcg_gen_discard_i64(cc_src);
586 tcg_gen_discard_i64(cc_dst);
587 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext *s)
595 TCGv_i32 local_cc_op = NULL;
596 TCGv_i64 dummy = NULL;
598 switch (s->cc_op) {
599 default:
600 dummy = tcg_const_i64(0);
601 /* FALLTHRU */
602 case CC_OP_ADD_64:
603 case CC_OP_ADDU_64:
604 case CC_OP_ADDC_64:
605 case CC_OP_SUB_64:
606 case CC_OP_SUBU_64:
607 case CC_OP_SUBB_64:
608 case CC_OP_ADD_32:
609 case CC_OP_ADDU_32:
610 case CC_OP_ADDC_32:
611 case CC_OP_SUB_32:
612 case CC_OP_SUBU_32:
613 case CC_OP_SUBB_32:
614 local_cc_op = tcg_const_i32(s->cc_op);
615 break;
616 case CC_OP_CONST0:
617 case CC_OP_CONST1:
618 case CC_OP_CONST2:
619 case CC_OP_CONST3:
620 case CC_OP_STATIC:
621 case CC_OP_DYNAMIC:
622 break;
625 switch (s->cc_op) {
626 case CC_OP_CONST0:
627 case CC_OP_CONST1:
628 case CC_OP_CONST2:
629 case CC_OP_CONST3:
630 /* s->cc_op is the cc value */
631 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
632 break;
633 case CC_OP_STATIC:
634 /* env->cc_op already is the cc value */
635 break;
636 case CC_OP_NZ:
637 case CC_OP_ABS_64:
638 case CC_OP_NABS_64:
639 case CC_OP_ABS_32:
640 case CC_OP_NABS_32:
641 case CC_OP_LTGT0_32:
642 case CC_OP_LTGT0_64:
643 case CC_OP_COMP_32:
644 case CC_OP_COMP_64:
645 case CC_OP_NZ_F32:
646 case CC_OP_NZ_F64:
647 case CC_OP_FLOGR:
648 case CC_OP_LCBB:
649 /* 1 argument */
650 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
651 break;
652 case CC_OP_ICM:
653 case CC_OP_LTGT_32:
654 case CC_OP_LTGT_64:
655 case CC_OP_LTUGTU_32:
656 case CC_OP_LTUGTU_64:
657 case CC_OP_TM_32:
658 case CC_OP_TM_64:
659 case CC_OP_SLA_32:
660 case CC_OP_SLA_64:
661 case CC_OP_NZ_F128:
662 case CC_OP_VC:
663 /* 2 arguments */
664 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
665 break;
666 case CC_OP_ADD_64:
667 case CC_OP_ADDU_64:
668 case CC_OP_ADDC_64:
669 case CC_OP_SUB_64:
670 case CC_OP_SUBU_64:
671 case CC_OP_SUBB_64:
672 case CC_OP_ADD_32:
673 case CC_OP_ADDU_32:
674 case CC_OP_ADDC_32:
675 case CC_OP_SUB_32:
676 case CC_OP_SUBU_32:
677 case CC_OP_SUBB_32:
678 /* 3 arguments */
679 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
680 break;
681 case CC_OP_DYNAMIC:
682 /* unknown operation - assume 3 arguments and cc_op in env */
683 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
684 break;
685 default:
686 tcg_abort();
689 if (local_cc_op) {
690 tcg_temp_free_i32(local_cc_op);
692 if (dummy) {
693 tcg_temp_free_i64(dummy);
696 /* We now have cc in cc_op as constant */
697 set_cc_static(s);
700 static bool use_exit_tb(DisasContext *s)
702 return s->base.singlestep_enabled ||
703 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
704 (s->base.tb->flags & FLAG_MASK_PER);
707 static bool use_goto_tb(DisasContext *s, uint64_t dest)
709 if (unlikely(use_exit_tb(s))) {
710 return false;
712 #ifndef CONFIG_USER_ONLY
713 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
714 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
715 #else
716 return true;
717 #endif
720 static void account_noninline_branch(DisasContext *s, int cc_op)
722 #ifdef DEBUG_INLINE_BRANCHES
723 inline_branch_miss[cc_op]++;
724 #endif
727 static void account_inline_branch(DisasContext *s, int cc_op)
729 #ifdef DEBUG_INLINE_BRANCHES
730 inline_branch_hit[cc_op]++;
731 #endif
734 /* Table of mask values to comparison codes, given a comparison as input.
735 For such, CC=3 should not be possible. */
736 static const TCGCond ltgt_cond[16] = {
737 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
738 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
739 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
740 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
741 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
742 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
743 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
744 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
747 /* Table of mask values to comparison codes, given a logic op as input.
748 For such, only CC=0 and CC=1 should be possible. */
749 static const TCGCond nz_cond[16] = {
750 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
751 TCG_COND_NEVER, TCG_COND_NEVER,
752 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
753 TCG_COND_NE, TCG_COND_NE,
754 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
755 TCG_COND_EQ, TCG_COND_EQ,
756 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
757 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
760 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
761 details required to generate a TCG comparison. */
762 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
764 TCGCond cond;
765 enum cc_op old_cc_op = s->cc_op;
767 if (mask == 15 || mask == 0) {
768 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
769 c->u.s32.a = cc_op;
770 c->u.s32.b = cc_op;
771 c->g1 = c->g2 = true;
772 c->is_64 = false;
773 return;
776 /* Find the TCG condition for the mask + cc op. */
777 switch (old_cc_op) {
778 case CC_OP_LTGT0_32:
779 case CC_OP_LTGT0_64:
780 case CC_OP_LTGT_32:
781 case CC_OP_LTGT_64:
782 cond = ltgt_cond[mask];
783 if (cond == TCG_COND_NEVER) {
784 goto do_dynamic;
786 account_inline_branch(s, old_cc_op);
787 break;
789 case CC_OP_LTUGTU_32:
790 case CC_OP_LTUGTU_64:
791 cond = tcg_unsigned_cond(ltgt_cond[mask]);
792 if (cond == TCG_COND_NEVER) {
793 goto do_dynamic;
795 account_inline_branch(s, old_cc_op);
796 break;
798 case CC_OP_NZ:
799 cond = nz_cond[mask];
800 if (cond == TCG_COND_NEVER) {
801 goto do_dynamic;
803 account_inline_branch(s, old_cc_op);
804 break;
806 case CC_OP_TM_32:
807 case CC_OP_TM_64:
808 switch (mask) {
809 case 8:
810 cond = TCG_COND_EQ;
811 break;
812 case 4 | 2 | 1:
813 cond = TCG_COND_NE;
814 break;
815 default:
816 goto do_dynamic;
818 account_inline_branch(s, old_cc_op);
819 break;
821 case CC_OP_ICM:
822 switch (mask) {
823 case 8:
824 cond = TCG_COND_EQ;
825 break;
826 case 4 | 2 | 1:
827 case 4 | 2:
828 cond = TCG_COND_NE;
829 break;
830 default:
831 goto do_dynamic;
833 account_inline_branch(s, old_cc_op);
834 break;
836 case CC_OP_FLOGR:
837 switch (mask & 0xa) {
838 case 8: /* src == 0 -> no one bit found */
839 cond = TCG_COND_EQ;
840 break;
841 case 2: /* src != 0 -> one bit found */
842 cond = TCG_COND_NE;
843 break;
844 default:
845 goto do_dynamic;
847 account_inline_branch(s, old_cc_op);
848 break;
850 case CC_OP_ADDU_32:
851 case CC_OP_ADDU_64:
852 switch (mask) {
853 case 8 | 2: /* vr == 0 */
854 cond = TCG_COND_EQ;
855 break;
856 case 4 | 1: /* vr != 0 */
857 cond = TCG_COND_NE;
858 break;
859 case 8 | 4: /* no carry -> vr >= src */
860 cond = TCG_COND_GEU;
861 break;
862 case 2 | 1: /* carry -> vr < src */
863 cond = TCG_COND_LTU;
864 break;
865 default:
866 goto do_dynamic;
868 account_inline_branch(s, old_cc_op);
869 break;
871 case CC_OP_SUBU_32:
872 case CC_OP_SUBU_64:
873 /* Note that CC=0 is impossible; treat it as dont-care. */
874 switch (mask & 7) {
875 case 2: /* zero -> op1 == op2 */
876 cond = TCG_COND_EQ;
877 break;
878 case 4 | 1: /* !zero -> op1 != op2 */
879 cond = TCG_COND_NE;
880 break;
881 case 4: /* borrow (!carry) -> op1 < op2 */
882 cond = TCG_COND_LTU;
883 break;
884 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
885 cond = TCG_COND_GEU;
886 break;
887 default:
888 goto do_dynamic;
890 account_inline_branch(s, old_cc_op);
891 break;
893 default:
894 do_dynamic:
895 /* Calculate cc value. */
896 gen_op_calc_cc(s);
897 /* FALLTHRU */
899 case CC_OP_STATIC:
900 /* Jump based on CC. We'll load up the real cond below;
901 the assignment here merely avoids a compiler warning. */
902 account_noninline_branch(s, old_cc_op);
903 old_cc_op = CC_OP_STATIC;
904 cond = TCG_COND_NEVER;
905 break;
908 /* Load up the arguments of the comparison. */
909 c->is_64 = true;
910 c->g1 = c->g2 = false;
911 switch (old_cc_op) {
912 case CC_OP_LTGT0_32:
913 c->is_64 = false;
914 c->u.s32.a = tcg_temp_new_i32();
915 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
916 c->u.s32.b = tcg_const_i32(0);
917 break;
918 case CC_OP_LTGT_32:
919 case CC_OP_LTUGTU_32:
920 case CC_OP_SUBU_32:
921 c->is_64 = false;
922 c->u.s32.a = tcg_temp_new_i32();
923 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
924 c->u.s32.b = tcg_temp_new_i32();
925 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
926 break;
928 case CC_OP_LTGT0_64:
929 case CC_OP_NZ:
930 case CC_OP_FLOGR:
931 c->u.s64.a = cc_dst;
932 c->u.s64.b = tcg_const_i64(0);
933 c->g1 = true;
934 break;
935 case CC_OP_LTGT_64:
936 case CC_OP_LTUGTU_64:
937 case CC_OP_SUBU_64:
938 c->u.s64.a = cc_src;
939 c->u.s64.b = cc_dst;
940 c->g1 = c->g2 = true;
941 break;
943 case CC_OP_TM_32:
944 case CC_OP_TM_64:
945 case CC_OP_ICM:
946 c->u.s64.a = tcg_temp_new_i64();
947 c->u.s64.b = tcg_const_i64(0);
948 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
949 break;
951 case CC_OP_ADDU_32:
952 c->is_64 = false;
953 c->u.s32.a = tcg_temp_new_i32();
954 c->u.s32.b = tcg_temp_new_i32();
955 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
956 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
957 tcg_gen_movi_i32(c->u.s32.b, 0);
958 } else {
959 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
961 break;
963 case CC_OP_ADDU_64:
964 c->u.s64.a = cc_vr;
965 c->g1 = true;
966 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
967 c->u.s64.b = tcg_const_i64(0);
968 } else {
969 c->u.s64.b = cc_src;
970 c->g2 = true;
972 break;
974 case CC_OP_STATIC:
975 c->is_64 = false;
976 c->u.s32.a = cc_op;
977 c->g1 = true;
978 switch (mask) {
979 case 0x8 | 0x4 | 0x2: /* cc != 3 */
980 cond = TCG_COND_NE;
981 c->u.s32.b = tcg_const_i32(3);
982 break;
983 case 0x8 | 0x4 | 0x1: /* cc != 2 */
984 cond = TCG_COND_NE;
985 c->u.s32.b = tcg_const_i32(2);
986 break;
987 case 0x8 | 0x2 | 0x1: /* cc != 1 */
988 cond = TCG_COND_NE;
989 c->u.s32.b = tcg_const_i32(1);
990 break;
991 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
992 cond = TCG_COND_EQ;
993 c->g1 = false;
994 c->u.s32.a = tcg_temp_new_i32();
995 c->u.s32.b = tcg_const_i32(0);
996 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
997 break;
998 case 0x8 | 0x4: /* cc < 2 */
999 cond = TCG_COND_LTU;
1000 c->u.s32.b = tcg_const_i32(2);
1001 break;
1002 case 0x8: /* cc == 0 */
1003 cond = TCG_COND_EQ;
1004 c->u.s32.b = tcg_const_i32(0);
1005 break;
1006 case 0x4 | 0x2 | 0x1: /* cc != 0 */
1007 cond = TCG_COND_NE;
1008 c->u.s32.b = tcg_const_i32(0);
1009 break;
1010 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
1011 cond = TCG_COND_NE;
1012 c->g1 = false;
1013 c->u.s32.a = tcg_temp_new_i32();
1014 c->u.s32.b = tcg_const_i32(0);
1015 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
1016 break;
1017 case 0x4: /* cc == 1 */
1018 cond = TCG_COND_EQ;
1019 c->u.s32.b = tcg_const_i32(1);
1020 break;
1021 case 0x2 | 0x1: /* cc > 1 */
1022 cond = TCG_COND_GTU;
1023 c->u.s32.b = tcg_const_i32(1);
1024 break;
1025 case 0x2: /* cc == 2 */
1026 cond = TCG_COND_EQ;
1027 c->u.s32.b = tcg_const_i32(2);
1028 break;
1029 case 0x1: /* cc == 3 */
1030 cond = TCG_COND_EQ;
1031 c->u.s32.b = tcg_const_i32(3);
1032 break;
1033 default:
1034 /* CC is masked by something else: (8 >> cc) & mask. */
1035 cond = TCG_COND_NE;
1036 c->g1 = false;
1037 c->u.s32.a = tcg_const_i32(8);
1038 c->u.s32.b = tcg_const_i32(0);
1039 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
1040 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1041 break;
1043 break;
1045 default:
1046 abort();
1048 c->cond = cond;
1051 static void free_compare(DisasCompare *c)
1053 if (!c->g1) {
1054 if (c->is_64) {
1055 tcg_temp_free_i64(c->u.s64.a);
1056 } else {
1057 tcg_temp_free_i32(c->u.s32.a);
1060 if (!c->g2) {
1061 if (c->is_64) {
1062 tcg_temp_free_i64(c->u.s64.b);
1063 } else {
1064 tcg_temp_free_i32(c->u.s32.b);
1069 /* ====================================================================== */
1070 /* Define the insn format enumeration. */
1071 #define F0(N) FMT_##N,
1072 #define F1(N, X1) F0(N)
1073 #define F2(N, X1, X2) F0(N)
1074 #define F3(N, X1, X2, X3) F0(N)
1075 #define F4(N, X1, X2, X3, X4) F0(N)
1076 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1077 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1079 typedef enum {
1080 #include "insn-format.def"
1081 } DisasFormat;
1083 #undef F0
1084 #undef F1
1085 #undef F2
1086 #undef F3
1087 #undef F4
1088 #undef F5
1089 #undef F6
1091 /* This is the way fields are to be accessed out of DisasFields. */
1092 #define have_field(S, F) have_field1((S), FLD_O_##F)
1093 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1095 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1097 return (s->fields.presentO >> c) & 1;
1100 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1101 enum DisasFieldIndexC c)
1103 assert(have_field1(s, o));
1104 return s->fields.c[c];
1107 /* Describe the layout of each field in each format. */
1108 typedef struct DisasField {
1109 unsigned int beg:8;
1110 unsigned int size:8;
1111 unsigned int type:2;
1112 unsigned int indexC:6;
1113 enum DisasFieldIndexO indexO:8;
1114 } DisasField;
1116 typedef struct DisasFormatInfo {
1117 DisasField op[NUM_C_FIELD];
1118 } DisasFormatInfo;
1120 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1121 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1122 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1123 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1125 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1126 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1127 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1128 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1129 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1130 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1131 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1132 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1133 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1134 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1136 #define F0(N) { { } },
1137 #define F1(N, X1) { { X1 } },
1138 #define F2(N, X1, X2) { { X1, X2 } },
1139 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1140 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1141 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1142 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1144 static const DisasFormatInfo format_info[] = {
1145 #include "insn-format.def"
1148 #undef F0
1149 #undef F1
1150 #undef F2
1151 #undef F3
1152 #undef F4
1153 #undef F5
1154 #undef F6
1155 #undef R
1156 #undef M
1157 #undef V
1158 #undef BD
1159 #undef BXD
1160 #undef BDL
1161 #undef BXDL
1162 #undef I
1163 #undef L
1165 /* Generally, we'll extract operands into this structures, operate upon
1166 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1167 of routines below for more details. */
1168 typedef struct {
1169 bool g_out, g_out2, g_in1, g_in2;
1170 TCGv_i64 out, out2, in1, in2;
1171 TCGv_i64 addr1;
1172 } DisasOps;
1174 /* Instructions can place constraints on their operands, raising specification
1175 exceptions if they are violated. To make this easy to automate, each "in1",
1176 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1177 of the following, or 0. To make this easy to document, we'll put the
1178 SPEC_<name> defines next to <name>. */
1180 #define SPEC_r1_even 1
1181 #define SPEC_r2_even 2
1182 #define SPEC_r3_even 4
1183 #define SPEC_r1_f128 8
1184 #define SPEC_r2_f128 16
1186 /* Return values from translate_one, indicating the state of the TB. */
1188 /* We are not using a goto_tb (for whatever reason), but have updated
1189 the PC (for whatever reason), so there's no need to do it again on
1190 exiting the TB. */
1191 #define DISAS_PC_UPDATED DISAS_TARGET_0
1193 /* We have emitted one or more goto_tb. No fixup required. */
1194 #define DISAS_GOTO_TB DISAS_TARGET_1
1196 /* We have updated the PC and CC values. */
1197 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1199 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1200 updated the PC for the next instruction to be executed. */
1201 #define DISAS_PC_STALE DISAS_TARGET_3
1203 /* We are exiting the TB to the main loop. */
1204 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1207 /* Instruction flags */
1208 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1209 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1210 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1211 #define IF_BFP 0x0008 /* binary floating point instruction */
1212 #define IF_DFP 0x0010 /* decimal floating point instruction */
1213 #define IF_PRIV 0x0020 /* privileged instruction */
1214 #define IF_VEC 0x0040 /* vector instruction */
1216 struct DisasInsn {
1217 unsigned opc:16;
1218 unsigned flags:16;
1219 DisasFormat fmt:8;
1220 unsigned fac:8;
1221 unsigned spec:8;
1223 const char *name;
1225 /* Pre-process arguments before HELP_OP. */
1226 void (*help_in1)(DisasContext *, DisasOps *);
1227 void (*help_in2)(DisasContext *, DisasOps *);
1228 void (*help_prep)(DisasContext *, DisasOps *);
1231 * Post-process output after HELP_OP.
1232 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1234 void (*help_wout)(DisasContext *, DisasOps *);
1235 void (*help_cout)(DisasContext *, DisasOps *);
1237 /* Implement the operation itself. */
1238 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1240 uint64_t data;
1243 /* ====================================================================== */
1244 /* Miscellaneous helpers, used by several operations. */
1246 static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1248 int b2 = get_field(s, b2);
1249 int d2 = get_field(s, d2);
1251 if (b2 == 0) {
1252 o->in2 = tcg_const_i64(d2 & mask);
1253 } else {
1254 o->in2 = get_address(s, 0, b2, d2);
1255 tcg_gen_andi_i64(o->in2, o->in2, mask);
1259 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1261 if (dest == s->pc_tmp) {
1262 per_branch(s, true);
1263 return DISAS_NEXT;
1265 if (use_goto_tb(s, dest)) {
1266 update_cc_op(s);
1267 per_breaking_event(s);
1268 tcg_gen_goto_tb(0);
1269 tcg_gen_movi_i64(psw_addr, dest);
1270 tcg_gen_exit_tb(s->base.tb, 0);
1271 return DISAS_GOTO_TB;
1272 } else {
1273 tcg_gen_movi_i64(psw_addr, dest);
1274 per_branch(s, false);
1275 return DISAS_PC_UPDATED;
1279 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1280 bool is_imm, int imm, TCGv_i64 cdest)
1282 DisasJumpType ret;
1283 uint64_t dest = s->base.pc_next + 2 * imm;
1284 TCGLabel *lab;
1286 /* Take care of the special cases first. */
1287 if (c->cond == TCG_COND_NEVER) {
1288 ret = DISAS_NEXT;
1289 goto egress;
1291 if (is_imm) {
1292 if (dest == s->pc_tmp) {
1293 /* Branch to next. */
1294 per_branch(s, true);
1295 ret = DISAS_NEXT;
1296 goto egress;
1298 if (c->cond == TCG_COND_ALWAYS) {
1299 ret = help_goto_direct(s, dest);
1300 goto egress;
1302 } else {
1303 if (!cdest) {
1304 /* E.g. bcr %r0 -> no branch. */
1305 ret = DISAS_NEXT;
1306 goto egress;
1308 if (c->cond == TCG_COND_ALWAYS) {
1309 tcg_gen_mov_i64(psw_addr, cdest);
1310 per_branch(s, false);
1311 ret = DISAS_PC_UPDATED;
1312 goto egress;
1316 if (use_goto_tb(s, s->pc_tmp)) {
1317 if (is_imm && use_goto_tb(s, dest)) {
1318 /* Both exits can use goto_tb. */
1319 update_cc_op(s);
1321 lab = gen_new_label();
1322 if (c->is_64) {
1323 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1324 } else {
1325 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1328 /* Branch not taken. */
1329 tcg_gen_goto_tb(0);
1330 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1331 tcg_gen_exit_tb(s->base.tb, 0);
1333 /* Branch taken. */
1334 gen_set_label(lab);
1335 per_breaking_event(s);
1336 tcg_gen_goto_tb(1);
1337 tcg_gen_movi_i64(psw_addr, dest);
1338 tcg_gen_exit_tb(s->base.tb, 1);
1340 ret = DISAS_GOTO_TB;
1341 } else {
1342 /* Fallthru can use goto_tb, but taken branch cannot. */
1343 /* Store taken branch destination before the brcond. This
1344 avoids having to allocate a new local temp to hold it.
1345 We'll overwrite this in the not taken case anyway. */
1346 if (!is_imm) {
1347 tcg_gen_mov_i64(psw_addr, cdest);
1350 lab = gen_new_label();
1351 if (c->is_64) {
1352 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1353 } else {
1354 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1357 /* Branch not taken. */
1358 update_cc_op(s);
1359 tcg_gen_goto_tb(0);
1360 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1361 tcg_gen_exit_tb(s->base.tb, 0);
1363 gen_set_label(lab);
1364 if (is_imm) {
1365 tcg_gen_movi_i64(psw_addr, dest);
1367 per_breaking_event(s);
1368 ret = DISAS_PC_UPDATED;
1370 } else {
1371 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1372 Most commonly we're single-stepping or some other condition that
1373 disables all use of goto_tb. Just update the PC and exit. */
1375 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1376 if (is_imm) {
1377 cdest = tcg_const_i64(dest);
1380 if (c->is_64) {
1381 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1382 cdest, next);
1383 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1384 } else {
1385 TCGv_i32 t0 = tcg_temp_new_i32();
1386 TCGv_i64 t1 = tcg_temp_new_i64();
1387 TCGv_i64 z = tcg_const_i64(0);
1388 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1389 tcg_gen_extu_i32_i64(t1, t0);
1390 tcg_temp_free_i32(t0);
1391 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1392 per_branch_cond(s, TCG_COND_NE, t1, z);
1393 tcg_temp_free_i64(t1);
1394 tcg_temp_free_i64(z);
1397 if (is_imm) {
1398 tcg_temp_free_i64(cdest);
1400 tcg_temp_free_i64(next);
1402 ret = DISAS_PC_UPDATED;
1405 egress:
1406 free_compare(c);
1407 return ret;
1410 /* ====================================================================== */
1411 /* The operations. These perform the bulk of the work for any insn,
1412 usually after the operands have been loaded and output initialized. */
1414 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1416 tcg_gen_abs_i64(o->out, o->in2);
1417 return DISAS_NEXT;
1420 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1422 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1423 return DISAS_NEXT;
1426 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1428 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1429 return DISAS_NEXT;
1432 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1434 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1435 tcg_gen_mov_i64(o->out2, o->in2);
1436 return DISAS_NEXT;
1439 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1441 tcg_gen_add_i64(o->out, o->in1, o->in2);
1442 return DISAS_NEXT;
1445 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1447 DisasCompare cmp;
1448 TCGv_i64 carry;
1450 tcg_gen_add_i64(o->out, o->in1, o->in2);
1452 /* The carry flag is the msb of CC, therefore the branch mask that would
1453 create that comparison is 3. Feeding the generated comparison to
1454 setcond produces the carry flag that we desire. */
1455 disas_jcc(s, &cmp, 3);
1456 carry = tcg_temp_new_i64();
1457 if (cmp.is_64) {
1458 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1459 } else {
1460 TCGv_i32 t = tcg_temp_new_i32();
1461 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1462 tcg_gen_extu_i32_i64(carry, t);
1463 tcg_temp_free_i32(t);
1465 free_compare(&cmp);
1467 tcg_gen_add_i64(o->out, o->out, carry);
1468 tcg_temp_free_i64(carry);
1469 return DISAS_NEXT;
1472 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1474 o->in1 = tcg_temp_new_i64();
1476 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1477 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1478 } else {
1479 /* Perform the atomic addition in memory. */
1480 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1481 s->insn->data);
1484 /* Recompute also for atomic case: needed for setting CC. */
1485 tcg_gen_add_i64(o->out, o->in1, o->in2);
1487 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1488 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1490 return DISAS_NEXT;
1493 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1495 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1496 return DISAS_NEXT;
1499 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1501 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1502 return DISAS_NEXT;
1505 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1507 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1508 return_low128(o->out2);
1509 return DISAS_NEXT;
1512 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1514 tcg_gen_and_i64(o->out, o->in1, o->in2);
1515 return DISAS_NEXT;
1518 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1520 int shift = s->insn->data & 0xff;
1521 int size = s->insn->data >> 8;
1522 uint64_t mask = ((1ull << size) - 1) << shift;
1524 assert(!o->g_in2);
1525 tcg_gen_shli_i64(o->in2, o->in2, shift);
1526 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1527 tcg_gen_and_i64(o->out, o->in1, o->in2);
1529 /* Produce the CC from only the bits manipulated. */
1530 tcg_gen_andi_i64(cc_dst, o->out, mask);
1531 set_cc_nz_u64(s, cc_dst);
1532 return DISAS_NEXT;
1535 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1537 o->in1 = tcg_temp_new_i64();
1539 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1540 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1541 } else {
1542 /* Perform the atomic operation in memory. */
1543 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1544 s->insn->data);
1547 /* Recompute also for atomic case: needed for setting CC. */
1548 tcg_gen_and_i64(o->out, o->in1, o->in2);
1550 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1551 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1553 return DISAS_NEXT;
1556 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1558 pc_to_link_info(o->out, s, s->pc_tmp);
1559 if (o->in2) {
1560 tcg_gen_mov_i64(psw_addr, o->in2);
1561 per_branch(s, false);
1562 return DISAS_PC_UPDATED;
1563 } else {
1564 return DISAS_NEXT;
1568 static void save_link_info(DisasContext *s, DisasOps *o)
1570 TCGv_i64 t;
1572 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1573 pc_to_link_info(o->out, s, s->pc_tmp);
1574 return;
1576 gen_op_calc_cc(s);
1577 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1578 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1579 t = tcg_temp_new_i64();
1580 tcg_gen_shri_i64(t, psw_mask, 16);
1581 tcg_gen_andi_i64(t, t, 0x0f000000);
1582 tcg_gen_or_i64(o->out, o->out, t);
1583 tcg_gen_extu_i32_i64(t, cc_op);
1584 tcg_gen_shli_i64(t, t, 28);
1585 tcg_gen_or_i64(o->out, o->out, t);
1586 tcg_temp_free_i64(t);
1589 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1591 save_link_info(s, o);
1592 if (o->in2) {
1593 tcg_gen_mov_i64(psw_addr, o->in2);
1594 per_branch(s, false);
1595 return DISAS_PC_UPDATED;
1596 } else {
1597 return DISAS_NEXT;
1601 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1603 pc_to_link_info(o->out, s, s->pc_tmp);
1604 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1607 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1609 int m1 = get_field(s, m1);
1610 bool is_imm = have_field(s, i2);
1611 int imm = is_imm ? get_field(s, i2) : 0;
1612 DisasCompare c;
1614 /* BCR with R2 = 0 causes no branching */
1615 if (have_field(s, r2) && get_field(s, r2) == 0) {
1616 if (m1 == 14) {
1617 /* Perform serialization */
1618 /* FIXME: check for fast-BCR-serialization facility */
1619 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1621 if (m1 == 15) {
1622 /* Perform serialization */
1623 /* FIXME: perform checkpoint-synchronisation */
1624 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1626 return DISAS_NEXT;
1629 disas_jcc(s, &c, m1);
1630 return help_branch(s, &c, is_imm, imm, o->in2);
1633 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1635 int r1 = get_field(s, r1);
1636 bool is_imm = have_field(s, i2);
1637 int imm = is_imm ? get_field(s, i2) : 0;
1638 DisasCompare c;
1639 TCGv_i64 t;
1641 c.cond = TCG_COND_NE;
1642 c.is_64 = false;
1643 c.g1 = false;
1644 c.g2 = false;
1646 t = tcg_temp_new_i64();
1647 tcg_gen_subi_i64(t, regs[r1], 1);
1648 store_reg32_i64(r1, t);
1649 c.u.s32.a = tcg_temp_new_i32();
1650 c.u.s32.b = tcg_const_i32(0);
1651 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1652 tcg_temp_free_i64(t);
1654 return help_branch(s, &c, is_imm, imm, o->in2);
1657 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1659 int r1 = get_field(s, r1);
1660 int imm = get_field(s, i2);
1661 DisasCompare c;
1662 TCGv_i64 t;
1664 c.cond = TCG_COND_NE;
1665 c.is_64 = false;
1666 c.g1 = false;
1667 c.g2 = false;
1669 t = tcg_temp_new_i64();
1670 tcg_gen_shri_i64(t, regs[r1], 32);
1671 tcg_gen_subi_i64(t, t, 1);
1672 store_reg32h_i64(r1, t);
1673 c.u.s32.a = tcg_temp_new_i32();
1674 c.u.s32.b = tcg_const_i32(0);
1675 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1676 tcg_temp_free_i64(t);
1678 return help_branch(s, &c, 1, imm, o->in2);
1681 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1683 int r1 = get_field(s, r1);
1684 bool is_imm = have_field(s, i2);
1685 int imm = is_imm ? get_field(s, i2) : 0;
1686 DisasCompare c;
1688 c.cond = TCG_COND_NE;
1689 c.is_64 = true;
1690 c.g1 = true;
1691 c.g2 = false;
1693 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1694 c.u.s64.a = regs[r1];
1695 c.u.s64.b = tcg_const_i64(0);
1697 return help_branch(s, &c, is_imm, imm, o->in2);
1700 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1702 int r1 = get_field(s, r1);
1703 int r3 = get_field(s, r3);
1704 bool is_imm = have_field(s, i2);
1705 int imm = is_imm ? get_field(s, i2) : 0;
1706 DisasCompare c;
1707 TCGv_i64 t;
1709 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1710 c.is_64 = false;
1711 c.g1 = false;
1712 c.g2 = false;
1714 t = tcg_temp_new_i64();
1715 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1716 c.u.s32.a = tcg_temp_new_i32();
1717 c.u.s32.b = tcg_temp_new_i32();
1718 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1719 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1720 store_reg32_i64(r1, t);
1721 tcg_temp_free_i64(t);
1723 return help_branch(s, &c, is_imm, imm, o->in2);
1726 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1728 int r1 = get_field(s, r1);
1729 int r3 = get_field(s, r3);
1730 bool is_imm = have_field(s, i2);
1731 int imm = is_imm ? get_field(s, i2) : 0;
1732 DisasCompare c;
1734 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1735 c.is_64 = true;
1737 if (r1 == (r3 | 1)) {
1738 c.u.s64.b = load_reg(r3 | 1);
1739 c.g2 = false;
1740 } else {
1741 c.u.s64.b = regs[r3 | 1];
1742 c.g2 = true;
1745 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1746 c.u.s64.a = regs[r1];
1747 c.g1 = true;
1749 return help_branch(s, &c, is_imm, imm, o->in2);
1752 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1754 int imm, m3 = get_field(s, m3);
1755 bool is_imm;
1756 DisasCompare c;
1758 c.cond = ltgt_cond[m3];
1759 if (s->insn->data) {
1760 c.cond = tcg_unsigned_cond(c.cond);
1762 c.is_64 = c.g1 = c.g2 = true;
1763 c.u.s64.a = o->in1;
1764 c.u.s64.b = o->in2;
1766 is_imm = have_field(s, i4);
1767 if (is_imm) {
1768 imm = get_field(s, i4);
1769 } else {
1770 imm = 0;
1771 o->out = get_address(s, 0, get_field(s, b4),
1772 get_field(s, d4));
1775 return help_branch(s, &c, is_imm, imm, o->out);
1778 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1780 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1781 set_cc_static(s);
1782 return DISAS_NEXT;
1785 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1787 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1788 set_cc_static(s);
1789 return DISAS_NEXT;
1792 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1794 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1795 set_cc_static(s);
1796 return DISAS_NEXT;
1799 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1800 bool m4_with_fpe)
1802 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1803 uint8_t m3 = get_field(s, m3);
1804 uint8_t m4 = get_field(s, m4);
1806 /* m3 field was introduced with FPE */
1807 if (!fpe && m3_with_fpe) {
1808 m3 = 0;
1810 /* m4 field was introduced with FPE */
1811 if (!fpe && m4_with_fpe) {
1812 m4 = 0;
1815 /* Check for valid rounding modes. Mode 3 was introduced later. */
1816 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1817 gen_program_exception(s, PGM_SPECIFICATION);
1818 return NULL;
1821 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1824 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1826 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1828 if (!m34) {
1829 return DISAS_NORETURN;
1831 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1832 tcg_temp_free_i32(m34);
1833 gen_set_cc_nz_f32(s, o->in2);
1834 return DISAS_NEXT;
1837 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1839 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1841 if (!m34) {
1842 return DISAS_NORETURN;
1844 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1845 tcg_temp_free_i32(m34);
1846 gen_set_cc_nz_f64(s, o->in2);
1847 return DISAS_NEXT;
1850 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1852 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1854 if (!m34) {
1855 return DISAS_NORETURN;
1857 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1858 tcg_temp_free_i32(m34);
1859 gen_set_cc_nz_f128(s, o->in1, o->in2);
1860 return DISAS_NEXT;
1863 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1865 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1867 if (!m34) {
1868 return DISAS_NORETURN;
1870 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1871 tcg_temp_free_i32(m34);
1872 gen_set_cc_nz_f32(s, o->in2);
1873 return DISAS_NEXT;
1876 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1878 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1880 if (!m34) {
1881 return DISAS_NORETURN;
1883 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1884 tcg_temp_free_i32(m34);
1885 gen_set_cc_nz_f64(s, o->in2);
1886 return DISAS_NEXT;
1889 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1891 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1893 if (!m34) {
1894 return DISAS_NORETURN;
1896 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1897 tcg_temp_free_i32(m34);
1898 gen_set_cc_nz_f128(s, o->in1, o->in2);
1899 return DISAS_NEXT;
1902 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1904 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1906 if (!m34) {
1907 return DISAS_NORETURN;
1909 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1910 tcg_temp_free_i32(m34);
1911 gen_set_cc_nz_f32(s, o->in2);
1912 return DISAS_NEXT;
1915 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1917 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1919 if (!m34) {
1920 return DISAS_NORETURN;
1922 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1923 tcg_temp_free_i32(m34);
1924 gen_set_cc_nz_f64(s, o->in2);
1925 return DISAS_NEXT;
1928 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1930 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1932 if (!m34) {
1933 return DISAS_NORETURN;
1935 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1936 tcg_temp_free_i32(m34);
1937 gen_set_cc_nz_f128(s, o->in1, o->in2);
1938 return DISAS_NEXT;
1941 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1943 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1945 if (!m34) {
1946 return DISAS_NORETURN;
1948 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1949 tcg_temp_free_i32(m34);
1950 gen_set_cc_nz_f32(s, o->in2);
1951 return DISAS_NEXT;
1954 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1956 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1958 if (!m34) {
1959 return DISAS_NORETURN;
1961 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1962 tcg_temp_free_i32(m34);
1963 gen_set_cc_nz_f64(s, o->in2);
1964 return DISAS_NEXT;
1967 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1969 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1971 if (!m34) {
1972 return DISAS_NORETURN;
1974 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1975 tcg_temp_free_i32(m34);
1976 gen_set_cc_nz_f128(s, o->in1, o->in2);
1977 return DISAS_NEXT;
1980 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1982 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1984 if (!m34) {
1985 return DISAS_NORETURN;
1987 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1988 tcg_temp_free_i32(m34);
1989 return DISAS_NEXT;
1992 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1994 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1996 if (!m34) {
1997 return DISAS_NORETURN;
1999 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2000 tcg_temp_free_i32(m34);
2001 return DISAS_NEXT;
2004 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2006 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2008 if (!m34) {
2009 return DISAS_NORETURN;
2011 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2012 tcg_temp_free_i32(m34);
2013 return_low128(o->out2);
2014 return DISAS_NEXT;
2017 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2019 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2021 if (!m34) {
2022 return DISAS_NORETURN;
2024 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2025 tcg_temp_free_i32(m34);
2026 return DISAS_NEXT;
2029 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2031 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2033 if (!m34) {
2034 return DISAS_NORETURN;
2036 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2037 tcg_temp_free_i32(m34);
2038 return DISAS_NEXT;
2041 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2043 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2045 if (!m34) {
2046 return DISAS_NORETURN;
2048 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2049 tcg_temp_free_i32(m34);
2050 return_low128(o->out2);
2051 return DISAS_NEXT;
2054 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2056 int r2 = get_field(s, r2);
2057 TCGv_i64 len = tcg_temp_new_i64();
2059 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2060 set_cc_static(s);
2061 return_low128(o->out);
2063 tcg_gen_add_i64(regs[r2], regs[r2], len);
2064 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2065 tcg_temp_free_i64(len);
2067 return DISAS_NEXT;
2070 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2072 int l = get_field(s, l1);
2073 TCGv_i32 vl;
2075 switch (l + 1) {
2076 case 1:
2077 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2078 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2079 break;
2080 case 2:
2081 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2082 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2083 break;
2084 case 4:
2085 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2086 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2087 break;
2088 case 8:
2089 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2090 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2091 break;
2092 default:
2093 vl = tcg_const_i32(l);
2094 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2095 tcg_temp_free_i32(vl);
2096 set_cc_static(s);
2097 return DISAS_NEXT;
2099 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2100 return DISAS_NEXT;
2103 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2105 int r1 = get_field(s, r1);
2106 int r2 = get_field(s, r2);
2107 TCGv_i32 t1, t2;
2109 /* r1 and r2 must be even. */
2110 if (r1 & 1 || r2 & 1) {
2111 gen_program_exception(s, PGM_SPECIFICATION);
2112 return DISAS_NORETURN;
2115 t1 = tcg_const_i32(r1);
2116 t2 = tcg_const_i32(r2);
2117 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2118 tcg_temp_free_i32(t1);
2119 tcg_temp_free_i32(t2);
2120 set_cc_static(s);
2121 return DISAS_NEXT;
2124 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2126 int r1 = get_field(s, r1);
2127 int r3 = get_field(s, r3);
2128 TCGv_i32 t1, t3;
2130 /* r1 and r3 must be even. */
2131 if (r1 & 1 || r3 & 1) {
2132 gen_program_exception(s, PGM_SPECIFICATION);
2133 return DISAS_NORETURN;
2136 t1 = tcg_const_i32(r1);
2137 t3 = tcg_const_i32(r3);
2138 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2139 tcg_temp_free_i32(t1);
2140 tcg_temp_free_i32(t3);
2141 set_cc_static(s);
2142 return DISAS_NEXT;
2145 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2147 int r1 = get_field(s, r1);
2148 int r3 = get_field(s, r3);
2149 TCGv_i32 t1, t3;
2151 /* r1 and r3 must be even. */
2152 if (r1 & 1 || r3 & 1) {
2153 gen_program_exception(s, PGM_SPECIFICATION);
2154 return DISAS_NORETURN;
2157 t1 = tcg_const_i32(r1);
2158 t3 = tcg_const_i32(r3);
2159 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2160 tcg_temp_free_i32(t1);
2161 tcg_temp_free_i32(t3);
2162 set_cc_static(s);
2163 return DISAS_NEXT;
2166 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2168 TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2169 TCGv_i32 t1 = tcg_temp_new_i32();
2170 tcg_gen_extrl_i64_i32(t1, o->in1);
2171 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2172 set_cc_static(s);
2173 tcg_temp_free_i32(t1);
2174 tcg_temp_free_i32(m3);
2175 return DISAS_NEXT;
2178 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2180 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2181 set_cc_static(s);
2182 return_low128(o->in2);
2183 return DISAS_NEXT;
2186 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2188 TCGv_i64 t = tcg_temp_new_i64();
2189 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2190 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2191 tcg_gen_or_i64(o->out, o->out, t);
2192 tcg_temp_free_i64(t);
2193 return DISAS_NEXT;
2196 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2198 int d2 = get_field(s, d2);
2199 int b2 = get_field(s, b2);
2200 TCGv_i64 addr, cc;
2202 /* Note that in1 = R3 (new value) and
2203 in2 = (zero-extended) R1 (expected value). */
2205 addr = get_address(s, 0, b2, d2);
2206 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2207 get_mem_index(s), s->insn->data | MO_ALIGN);
2208 tcg_temp_free_i64(addr);
2210 /* Are the memory and expected values (un)equal? Note that this setcond
2211 produces the output CC value, thus the NE sense of the test. */
2212 cc = tcg_temp_new_i64();
2213 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2214 tcg_gen_extrl_i64_i32(cc_op, cc);
2215 tcg_temp_free_i64(cc);
2216 set_cc_static(s);
2218 return DISAS_NEXT;
2221 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2223 int r1 = get_field(s, r1);
2224 int r3 = get_field(s, r3);
2225 int d2 = get_field(s, d2);
2226 int b2 = get_field(s, b2);
2227 DisasJumpType ret = DISAS_NEXT;
2228 TCGv_i64 addr;
2229 TCGv_i32 t_r1, t_r3;
2231 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2232 addr = get_address(s, 0, b2, d2);
2233 t_r1 = tcg_const_i32(r1);
2234 t_r3 = tcg_const_i32(r3);
2235 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2236 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2237 } else if (HAVE_CMPXCHG128) {
2238 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2239 } else {
2240 gen_helper_exit_atomic(cpu_env);
2241 ret = DISAS_NORETURN;
2243 tcg_temp_free_i64(addr);
2244 tcg_temp_free_i32(t_r1);
2245 tcg_temp_free_i32(t_r3);
2247 set_cc_static(s);
2248 return ret;
2251 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2253 int r3 = get_field(s, r3);
2254 TCGv_i32 t_r3 = tcg_const_i32(r3);
2256 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2257 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2258 } else {
2259 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2261 tcg_temp_free_i32(t_r3);
2263 set_cc_static(s);
2264 return DISAS_NEXT;
2267 #ifndef CONFIG_USER_ONLY
2268 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2270 MemOp mop = s->insn->data;
2271 TCGv_i64 addr, old, cc;
2272 TCGLabel *lab = gen_new_label();
2274 /* Note that in1 = R1 (zero-extended expected value),
2275 out = R1 (original reg), out2 = R1+1 (new value). */
2277 addr = tcg_temp_new_i64();
2278 old = tcg_temp_new_i64();
2279 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2280 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2281 get_mem_index(s), mop | MO_ALIGN);
2282 tcg_temp_free_i64(addr);
2284 /* Are the memory and expected values (un)equal? */
2285 cc = tcg_temp_new_i64();
2286 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2287 tcg_gen_extrl_i64_i32(cc_op, cc);
2289 /* Write back the output now, so that it happens before the
2290 following branch, so that we don't need local temps. */
2291 if ((mop & MO_SIZE) == MO_32) {
2292 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2293 } else {
2294 tcg_gen_mov_i64(o->out, old);
2296 tcg_temp_free_i64(old);
2298 /* If the comparison was equal, and the LSB of R2 was set,
2299 then we need to flush the TLB (for all cpus). */
2300 tcg_gen_xori_i64(cc, cc, 1);
2301 tcg_gen_and_i64(cc, cc, o->in2);
2302 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2303 tcg_temp_free_i64(cc);
2305 gen_helper_purge(cpu_env);
2306 gen_set_label(lab);
2308 return DISAS_NEXT;
2310 #endif
2312 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2314 TCGv_i64 t1 = tcg_temp_new_i64();
2315 TCGv_i32 t2 = tcg_temp_new_i32();
2316 tcg_gen_extrl_i64_i32(t2, o->in1);
2317 gen_helper_cvd(t1, t2);
2318 tcg_temp_free_i32(t2);
2319 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2320 tcg_temp_free_i64(t1);
2321 return DISAS_NEXT;
2324 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2326 int m3 = get_field(s, m3);
2327 TCGLabel *lab = gen_new_label();
2328 TCGCond c;
2330 c = tcg_invert_cond(ltgt_cond[m3]);
2331 if (s->insn->data) {
2332 c = tcg_unsigned_cond(c);
2334 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2336 /* Trap. */
2337 gen_trap(s);
2339 gen_set_label(lab);
2340 return DISAS_NEXT;
2343 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2345 int m3 = get_field(s, m3);
2346 int r1 = get_field(s, r1);
2347 int r2 = get_field(s, r2);
2348 TCGv_i32 tr1, tr2, chk;
2350 /* R1 and R2 must both be even. */
2351 if ((r1 | r2) & 1) {
2352 gen_program_exception(s, PGM_SPECIFICATION);
2353 return DISAS_NORETURN;
2355 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2356 m3 = 0;
2359 tr1 = tcg_const_i32(r1);
2360 tr2 = tcg_const_i32(r2);
2361 chk = tcg_const_i32(m3);
2363 switch (s->insn->data) {
2364 case 12:
2365 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2366 break;
2367 case 14:
2368 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2369 break;
2370 case 21:
2371 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2372 break;
2373 case 24:
2374 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2375 break;
2376 case 41:
2377 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2378 break;
2379 case 42:
2380 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2381 break;
2382 default:
2383 g_assert_not_reached();
2386 tcg_temp_free_i32(tr1);
2387 tcg_temp_free_i32(tr2);
2388 tcg_temp_free_i32(chk);
2389 set_cc_static(s);
2390 return DISAS_NEXT;
2393 #ifndef CONFIG_USER_ONLY
2394 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2396 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2397 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2398 TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2400 gen_helper_diag(cpu_env, r1, r3, func_code);
2402 tcg_temp_free_i32(func_code);
2403 tcg_temp_free_i32(r3);
2404 tcg_temp_free_i32(r1);
2405 return DISAS_NEXT;
2407 #endif
2409 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2411 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2412 return_low128(o->out);
2413 return DISAS_NEXT;
2416 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2418 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2419 return_low128(o->out);
2420 return DISAS_NEXT;
2423 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2425 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2426 return_low128(o->out);
2427 return DISAS_NEXT;
2430 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2432 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2433 return_low128(o->out);
2434 return DISAS_NEXT;
2437 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2439 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2440 return DISAS_NEXT;
2443 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2445 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2446 return DISAS_NEXT;
2449 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2451 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2452 return_low128(o->out2);
2453 return DISAS_NEXT;
2456 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2458 int r2 = get_field(s, r2);
2459 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2460 return DISAS_NEXT;
2463 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2465 /* No cache information provided. */
2466 tcg_gen_movi_i64(o->out, -1);
2467 return DISAS_NEXT;
2470 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2472 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2473 return DISAS_NEXT;
2476 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2478 int r1 = get_field(s, r1);
2479 int r2 = get_field(s, r2);
2480 TCGv_i64 t = tcg_temp_new_i64();
2482 /* Note the "subsequently" in the PoO, which implies a defined result
2483 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2484 tcg_gen_shri_i64(t, psw_mask, 32);
2485 store_reg32_i64(r1, t);
2486 if (r2 != 0) {
2487 store_reg32_i64(r2, psw_mask);
2490 tcg_temp_free_i64(t);
2491 return DISAS_NEXT;
2494 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2496 int r1 = get_field(s, r1);
2497 TCGv_i32 ilen;
2498 TCGv_i64 v1;
2500 /* Nested EXECUTE is not allowed. */
2501 if (unlikely(s->ex_value)) {
2502 gen_program_exception(s, PGM_EXECUTE);
2503 return DISAS_NORETURN;
2506 update_psw_addr(s);
2507 update_cc_op(s);
2509 if (r1 == 0) {
2510 v1 = tcg_const_i64(0);
2511 } else {
2512 v1 = regs[r1];
2515 ilen = tcg_const_i32(s->ilen);
2516 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2517 tcg_temp_free_i32(ilen);
2519 if (r1 == 0) {
2520 tcg_temp_free_i64(v1);
2523 return DISAS_PC_CC_UPDATED;
2526 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2528 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2530 if (!m34) {
2531 return DISAS_NORETURN;
2533 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2534 tcg_temp_free_i32(m34);
2535 return DISAS_NEXT;
2538 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2540 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2542 if (!m34) {
2543 return DISAS_NORETURN;
2545 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2546 tcg_temp_free_i32(m34);
2547 return DISAS_NEXT;
2550 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2552 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2554 if (!m34) {
2555 return DISAS_NORETURN;
2557 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2558 return_low128(o->out2);
2559 tcg_temp_free_i32(m34);
2560 return DISAS_NEXT;
2563 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2565 /* We'll use the original input for cc computation, since we get to
2566 compare that against 0, which ought to be better than comparing
2567 the real output against 64. It also lets cc_dst be a convenient
2568 temporary during our computation. */
2569 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2571 /* R1 = IN ? CLZ(IN) : 64. */
2572 tcg_gen_clzi_i64(o->out, o->in2, 64);
2574 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2575 value by 64, which is undefined. But since the shift is 64 iff the
2576 input is zero, we still get the correct result after and'ing. */
2577 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2578 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2579 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2580 return DISAS_NEXT;
2583 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2585 int m3 = get_field(s, m3);
2586 int pos, len, base = s->insn->data;
2587 TCGv_i64 tmp = tcg_temp_new_i64();
2588 uint64_t ccm;
2590 switch (m3) {
2591 case 0xf:
2592 /* Effectively a 32-bit load. */
2593 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2594 len = 32;
2595 goto one_insert;
2597 case 0xc:
2598 case 0x6:
2599 case 0x3:
2600 /* Effectively a 16-bit load. */
2601 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2602 len = 16;
2603 goto one_insert;
2605 case 0x8:
2606 case 0x4:
2607 case 0x2:
2608 case 0x1:
2609 /* Effectively an 8-bit load. */
2610 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2611 len = 8;
2612 goto one_insert;
2614 one_insert:
2615 pos = base + ctz32(m3) * 8;
2616 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2617 ccm = ((1ull << len) - 1) << pos;
2618 break;
2620 default:
2621 /* This is going to be a sequence of loads and inserts. */
2622 pos = base + 32 - 8;
2623 ccm = 0;
2624 while (m3) {
2625 if (m3 & 0x8) {
2626 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2627 tcg_gen_addi_i64(o->in2, o->in2, 1);
2628 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2629 ccm |= 0xff << pos;
2631 m3 = (m3 << 1) & 0xf;
2632 pos -= 8;
2634 break;
2637 tcg_gen_movi_i64(tmp, ccm);
2638 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2639 tcg_temp_free_i64(tmp);
2640 return DISAS_NEXT;
2643 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2645 int shift = s->insn->data & 0xff;
2646 int size = s->insn->data >> 8;
2647 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2648 return DISAS_NEXT;
2651 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2653 TCGv_i64 t1, t2;
2655 gen_op_calc_cc(s);
2656 t1 = tcg_temp_new_i64();
2657 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2658 t2 = tcg_temp_new_i64();
2659 tcg_gen_extu_i32_i64(t2, cc_op);
2660 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2661 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2662 tcg_temp_free_i64(t1);
2663 tcg_temp_free_i64(t2);
2664 return DISAS_NEXT;
2667 #ifndef CONFIG_USER_ONLY
2668 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2670 TCGv_i32 m4;
2672 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2673 m4 = tcg_const_i32(get_field(s, m4));
2674 } else {
2675 m4 = tcg_const_i32(0);
2677 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2678 tcg_temp_free_i32(m4);
2679 return DISAS_NEXT;
2682 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2684 TCGv_i32 m4;
2686 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2687 m4 = tcg_const_i32(get_field(s, m4));
2688 } else {
2689 m4 = tcg_const_i32(0);
2691 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2692 tcg_temp_free_i32(m4);
2693 return DISAS_NEXT;
2696 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2698 gen_helper_iske(o->out, cpu_env, o->in2);
2699 return DISAS_NEXT;
2701 #endif
2703 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2705 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2706 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2707 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2708 TCGv_i32 t_r1, t_r2, t_r3, type;
2710 switch (s->insn->data) {
2711 case S390_FEAT_TYPE_KMCTR:
2712 if (r3 & 1 || !r3) {
2713 gen_program_exception(s, PGM_SPECIFICATION);
2714 return DISAS_NORETURN;
2716 /* FALL THROUGH */
2717 case S390_FEAT_TYPE_PPNO:
2718 case S390_FEAT_TYPE_KMF:
2719 case S390_FEAT_TYPE_KMC:
2720 case S390_FEAT_TYPE_KMO:
2721 case S390_FEAT_TYPE_KM:
2722 if (r1 & 1 || !r1) {
2723 gen_program_exception(s, PGM_SPECIFICATION);
2724 return DISAS_NORETURN;
2726 /* FALL THROUGH */
2727 case S390_FEAT_TYPE_KMAC:
2728 case S390_FEAT_TYPE_KIMD:
2729 case S390_FEAT_TYPE_KLMD:
2730 if (r2 & 1 || !r2) {
2731 gen_program_exception(s, PGM_SPECIFICATION);
2732 return DISAS_NORETURN;
2734 /* FALL THROUGH */
2735 case S390_FEAT_TYPE_PCKMO:
2736 case S390_FEAT_TYPE_PCC:
2737 break;
2738 default:
2739 g_assert_not_reached();
2742 t_r1 = tcg_const_i32(r1);
2743 t_r2 = tcg_const_i32(r2);
2744 t_r3 = tcg_const_i32(r3);
2745 type = tcg_const_i32(s->insn->data);
2746 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2747 set_cc_static(s);
2748 tcg_temp_free_i32(t_r1);
2749 tcg_temp_free_i32(t_r2);
2750 tcg_temp_free_i32(t_r3);
2751 tcg_temp_free_i32(type);
2752 return DISAS_NEXT;
2755 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2757 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2758 set_cc_static(s);
2759 return DISAS_NEXT;
2762 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2764 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2765 set_cc_static(s);
2766 return DISAS_NEXT;
2769 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2771 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2772 set_cc_static(s);
2773 return DISAS_NEXT;
2776 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2778 /* The real output is indeed the original value in memory;
2779 recompute the addition for the computation of CC. */
2780 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2781 s->insn->data | MO_ALIGN);
2782 /* However, we need to recompute the addition for setting CC. */
2783 tcg_gen_add_i64(o->out, o->in1, o->in2);
2784 return DISAS_NEXT;
2787 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2789 /* The real output is indeed the original value in memory;
2790 recompute the addition for the computation of CC. */
2791 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2792 s->insn->data | MO_ALIGN);
2793 /* However, we need to recompute the operation for setting CC. */
2794 tcg_gen_and_i64(o->out, o->in1, o->in2);
2795 return DISAS_NEXT;
2798 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2800 /* The real output is indeed the original value in memory;
2801 recompute the addition for the computation of CC. */
2802 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2803 s->insn->data | MO_ALIGN);
2804 /* However, we need to recompute the operation for setting CC. */
2805 tcg_gen_or_i64(o->out, o->in1, o->in2);
2806 return DISAS_NEXT;
2809 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2811 /* The real output is indeed the original value in memory;
2812 recompute the addition for the computation of CC. */
2813 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2814 s->insn->data | MO_ALIGN);
2815 /* However, we need to recompute the operation for setting CC. */
2816 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2817 return DISAS_NEXT;
2820 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2822 gen_helper_ldeb(o->out, cpu_env, o->in2);
2823 return DISAS_NEXT;
2826 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2828 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2830 if (!m34) {
2831 return DISAS_NORETURN;
2833 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2834 tcg_temp_free_i32(m34);
2835 return DISAS_NEXT;
2838 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2840 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2842 if (!m34) {
2843 return DISAS_NORETURN;
2845 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2846 tcg_temp_free_i32(m34);
2847 return DISAS_NEXT;
2850 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2852 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2854 if (!m34) {
2855 return DISAS_NORETURN;
2857 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2858 tcg_temp_free_i32(m34);
2859 return DISAS_NEXT;
2862 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2864 gen_helper_lxdb(o->out, cpu_env, o->in2);
2865 return_low128(o->out2);
2866 return DISAS_NEXT;
2869 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2871 gen_helper_lxeb(o->out, cpu_env, o->in2);
2872 return_low128(o->out2);
2873 return DISAS_NEXT;
2876 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2878 tcg_gen_shli_i64(o->out, o->in2, 32);
2879 return DISAS_NEXT;
2882 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2884 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2885 return DISAS_NEXT;
2888 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2890 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2891 return DISAS_NEXT;
2894 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2896 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2897 return DISAS_NEXT;
2900 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2902 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2903 return DISAS_NEXT;
2906 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2908 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2909 return DISAS_NEXT;
2912 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2914 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2915 return DISAS_NEXT;
2918 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2920 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2921 return DISAS_NEXT;
2924 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2926 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2927 return DISAS_NEXT;
2930 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2932 TCGLabel *lab = gen_new_label();
2933 store_reg32_i64(get_field(s, r1), o->in2);
2934 /* The value is stored even in case of trap. */
2935 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2936 gen_trap(s);
2937 gen_set_label(lab);
2938 return DISAS_NEXT;
2941 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2943 TCGLabel *lab = gen_new_label();
2944 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2945 /* The value is stored even in case of trap. */
2946 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2947 gen_trap(s);
2948 gen_set_label(lab);
2949 return DISAS_NEXT;
2952 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2954 TCGLabel *lab = gen_new_label();
2955 store_reg32h_i64(get_field(s, r1), o->in2);
2956 /* The value is stored even in case of trap. */
2957 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2958 gen_trap(s);
2959 gen_set_label(lab);
2960 return DISAS_NEXT;
2963 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2965 TCGLabel *lab = gen_new_label();
2966 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2967 /* The value is stored even in case of trap. */
2968 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2969 gen_trap(s);
2970 gen_set_label(lab);
2971 return DISAS_NEXT;
2974 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2976 TCGLabel *lab = gen_new_label();
2977 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2978 /* The value is stored even in case of trap. */
2979 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2980 gen_trap(s);
2981 gen_set_label(lab);
2982 return DISAS_NEXT;
2985 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2987 DisasCompare c;
2989 disas_jcc(s, &c, get_field(s, m3));
2991 if (c.is_64) {
2992 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2993 o->in2, o->in1);
2994 free_compare(&c);
2995 } else {
2996 TCGv_i32 t32 = tcg_temp_new_i32();
2997 TCGv_i64 t, z;
2999 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3000 free_compare(&c);
3002 t = tcg_temp_new_i64();
3003 tcg_gen_extu_i32_i64(t, t32);
3004 tcg_temp_free_i32(t32);
3006 z = tcg_const_i64(0);
3007 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3008 tcg_temp_free_i64(t);
3009 tcg_temp_free_i64(z);
3012 return DISAS_NEXT;
3015 #ifndef CONFIG_USER_ONLY
3016 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3018 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3019 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3020 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3021 tcg_temp_free_i32(r1);
3022 tcg_temp_free_i32(r3);
3023 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3024 return DISAS_PC_STALE_NOCHAIN;
3027 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3029 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3030 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3031 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3032 tcg_temp_free_i32(r1);
3033 tcg_temp_free_i32(r3);
3034 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3035 return DISAS_PC_STALE_NOCHAIN;
3038 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3040 gen_helper_lra(o->out, cpu_env, o->in2);
3041 set_cc_static(s);
3042 return DISAS_NEXT;
3045 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3047 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3048 return DISAS_NEXT;
3051 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3053 TCGv_i64 t1, t2;
3055 per_breaking_event(s);
3057 t1 = tcg_temp_new_i64();
3058 t2 = tcg_temp_new_i64();
3059 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3060 MO_TEUL | MO_ALIGN_8);
3061 tcg_gen_addi_i64(o->in2, o->in2, 4);
3062 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3063 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3064 tcg_gen_shli_i64(t1, t1, 32);
3065 gen_helper_load_psw(cpu_env, t1, t2);
3066 tcg_temp_free_i64(t1);
3067 tcg_temp_free_i64(t2);
3068 return DISAS_NORETURN;
3071 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3073 TCGv_i64 t1, t2;
3075 per_breaking_event(s);
3077 t1 = tcg_temp_new_i64();
3078 t2 = tcg_temp_new_i64();
3079 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3080 MO_TEQ | MO_ALIGN_8);
3081 tcg_gen_addi_i64(o->in2, o->in2, 8);
3082 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3083 gen_helper_load_psw(cpu_env, t1, t2);
3084 tcg_temp_free_i64(t1);
3085 tcg_temp_free_i64(t2);
3086 return DISAS_NORETURN;
3088 #endif
3090 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3092 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3093 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3094 gen_helper_lam(cpu_env, r1, o->in2, r3);
3095 tcg_temp_free_i32(r1);
3096 tcg_temp_free_i32(r3);
3097 return DISAS_NEXT;
3100 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3102 int r1 = get_field(s, r1);
3103 int r3 = get_field(s, r3);
3104 TCGv_i64 t1, t2;
3106 /* Only one register to read. */
3107 t1 = tcg_temp_new_i64();
3108 if (unlikely(r1 == r3)) {
3109 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3110 store_reg32_i64(r1, t1);
3111 tcg_temp_free(t1);
3112 return DISAS_NEXT;
3115 /* First load the values of the first and last registers to trigger
3116 possible page faults. */
3117 t2 = tcg_temp_new_i64();
3118 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3119 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3120 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3121 store_reg32_i64(r1, t1);
3122 store_reg32_i64(r3, t2);
3124 /* Only two registers to read. */
3125 if (((r1 + 1) & 15) == r3) {
3126 tcg_temp_free(t2);
3127 tcg_temp_free(t1);
3128 return DISAS_NEXT;
3131 /* Then load the remaining registers. Page fault can't occur. */
3132 r3 = (r3 - 1) & 15;
3133 tcg_gen_movi_i64(t2, 4);
3134 while (r1 != r3) {
3135 r1 = (r1 + 1) & 15;
3136 tcg_gen_add_i64(o->in2, o->in2, t2);
3137 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3138 store_reg32_i64(r1, t1);
3140 tcg_temp_free(t2);
3141 tcg_temp_free(t1);
3143 return DISAS_NEXT;
3146 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3148 int r1 = get_field(s, r1);
3149 int r3 = get_field(s, r3);
3150 TCGv_i64 t1, t2;
3152 /* Only one register to read. */
3153 t1 = tcg_temp_new_i64();
3154 if (unlikely(r1 == r3)) {
3155 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3156 store_reg32h_i64(r1, t1);
3157 tcg_temp_free(t1);
3158 return DISAS_NEXT;
3161 /* First load the values of the first and last registers to trigger
3162 possible page faults. */
3163 t2 = tcg_temp_new_i64();
3164 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3165 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3166 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3167 store_reg32h_i64(r1, t1);
3168 store_reg32h_i64(r3, t2);
3170 /* Only two registers to read. */
3171 if (((r1 + 1) & 15) == r3) {
3172 tcg_temp_free(t2);
3173 tcg_temp_free(t1);
3174 return DISAS_NEXT;
3177 /* Then load the remaining registers. Page fault can't occur. */
3178 r3 = (r3 - 1) & 15;
3179 tcg_gen_movi_i64(t2, 4);
3180 while (r1 != r3) {
3181 r1 = (r1 + 1) & 15;
3182 tcg_gen_add_i64(o->in2, o->in2, t2);
3183 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3184 store_reg32h_i64(r1, t1);
3186 tcg_temp_free(t2);
3187 tcg_temp_free(t1);
3189 return DISAS_NEXT;
3192 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3194 int r1 = get_field(s, r1);
3195 int r3 = get_field(s, r3);
3196 TCGv_i64 t1, t2;
3198 /* Only one register to read. */
3199 if (unlikely(r1 == r3)) {
3200 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3201 return DISAS_NEXT;
3204 /* First load the values of the first and last registers to trigger
3205 possible page faults. */
3206 t1 = tcg_temp_new_i64();
3207 t2 = tcg_temp_new_i64();
3208 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3209 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3210 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3211 tcg_gen_mov_i64(regs[r1], t1);
3212 tcg_temp_free(t2);
3214 /* Only two registers to read. */
3215 if (((r1 + 1) & 15) == r3) {
3216 tcg_temp_free(t1);
3217 return DISAS_NEXT;
3220 /* Then load the remaining registers. Page fault can't occur. */
3221 r3 = (r3 - 1) & 15;
3222 tcg_gen_movi_i64(t1, 8);
3223 while (r1 != r3) {
3224 r1 = (r1 + 1) & 15;
3225 tcg_gen_add_i64(o->in2, o->in2, t1);
3226 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3228 tcg_temp_free(t1);
3230 return DISAS_NEXT;
3233 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3235 TCGv_i64 a1, a2;
3236 MemOp mop = s->insn->data;
3238 /* In a parallel context, stop the world and single step. */
3239 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3240 update_psw_addr(s);
3241 update_cc_op(s);
3242 gen_exception(EXCP_ATOMIC);
3243 return DISAS_NORETURN;
3246 /* In a serial context, perform the two loads ... */
3247 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3248 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3249 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3250 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3251 tcg_temp_free_i64(a1);
3252 tcg_temp_free_i64(a2);
3254 /* ... and indicate that we performed them while interlocked. */
3255 gen_op_movi_cc(s, 0);
3256 return DISAS_NEXT;
3259 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3261 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3262 gen_helper_lpq(o->out, cpu_env, o->in2);
3263 } else if (HAVE_ATOMIC128) {
3264 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3265 } else {
3266 gen_helper_exit_atomic(cpu_env);
3267 return DISAS_NORETURN;
3269 return_low128(o->out2);
3270 return DISAS_NEXT;
3273 #ifndef CONFIG_USER_ONLY
3274 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3276 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
3277 tcg_gen_qemu_ld_tl(o->out, o->addr1, MMU_REAL_IDX, s->insn->data);
3278 return DISAS_NEXT;
3280 #endif
3282 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3284 tcg_gen_andi_i64(o->out, o->in2, -256);
3285 return DISAS_NEXT;
3288 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3290 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3292 if (get_field(s, m3) > 6) {
3293 gen_program_exception(s, PGM_SPECIFICATION);
3294 return DISAS_NORETURN;
3297 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3298 tcg_gen_neg_i64(o->addr1, o->addr1);
3299 tcg_gen_movi_i64(o->out, 16);
3300 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3301 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3302 return DISAS_NEXT;
3305 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3307 o->out = o->in2;
3308 o->g_out = o->g_in2;
3309 o->in2 = NULL;
3310 o->g_in2 = false;
3311 return DISAS_NEXT;
3314 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3316 int b2 = get_field(s, b2);
3317 TCGv ar1 = tcg_temp_new_i64();
3319 o->out = o->in2;
3320 o->g_out = o->g_in2;
3321 o->in2 = NULL;
3322 o->g_in2 = false;
3324 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3325 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3326 tcg_gen_movi_i64(ar1, 0);
3327 break;
3328 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3329 tcg_gen_movi_i64(ar1, 1);
3330 break;
3331 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3332 if (b2) {
3333 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3334 } else {
3335 tcg_gen_movi_i64(ar1, 0);
3337 break;
3338 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3339 tcg_gen_movi_i64(ar1, 2);
3340 break;
3343 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3344 tcg_temp_free_i64(ar1);
3346 return DISAS_NEXT;
3349 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3351 o->out = o->in1;
3352 o->out2 = o->in2;
3353 o->g_out = o->g_in1;
3354 o->g_out2 = o->g_in2;
3355 o->in1 = NULL;
3356 o->in2 = NULL;
3357 o->g_in1 = o->g_in2 = false;
3358 return DISAS_NEXT;
3361 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3363 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3364 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3365 tcg_temp_free_i32(l);
3366 return DISAS_NEXT;
3369 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3371 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3372 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3373 tcg_temp_free_i32(l);
3374 return DISAS_NEXT;
3377 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3379 int r1 = get_field(s, r1);
3380 int r2 = get_field(s, r2);
3381 TCGv_i32 t1, t2;
3383 /* r1 and r2 must be even. */
3384 if (r1 & 1 || r2 & 1) {
3385 gen_program_exception(s, PGM_SPECIFICATION);
3386 return DISAS_NORETURN;
3389 t1 = tcg_const_i32(r1);
3390 t2 = tcg_const_i32(r2);
3391 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3392 tcg_temp_free_i32(t1);
3393 tcg_temp_free_i32(t2);
3394 set_cc_static(s);
3395 return DISAS_NEXT;
3398 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3400 int r1 = get_field(s, r1);
3401 int r3 = get_field(s, r3);
3402 TCGv_i32 t1, t3;
3404 /* r1 and r3 must be even. */
3405 if (r1 & 1 || r3 & 1) {
3406 gen_program_exception(s, PGM_SPECIFICATION);
3407 return DISAS_NORETURN;
3410 t1 = tcg_const_i32(r1);
3411 t3 = tcg_const_i32(r3);
3412 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3413 tcg_temp_free_i32(t1);
3414 tcg_temp_free_i32(t3);
3415 set_cc_static(s);
3416 return DISAS_NEXT;
3419 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3421 int r1 = get_field(s, r1);
3422 int r3 = get_field(s, r3);
3423 TCGv_i32 t1, t3;
3425 /* r1 and r3 must be even. */
3426 if (r1 & 1 || r3 & 1) {
3427 gen_program_exception(s, PGM_SPECIFICATION);
3428 return DISAS_NORETURN;
3431 t1 = tcg_const_i32(r1);
3432 t3 = tcg_const_i32(r3);
3433 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3434 tcg_temp_free_i32(t1);
3435 tcg_temp_free_i32(t3);
3436 set_cc_static(s);
3437 return DISAS_NEXT;
3440 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3442 int r3 = get_field(s, r3);
3443 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3444 set_cc_static(s);
3445 return DISAS_NEXT;
3448 #ifndef CONFIG_USER_ONLY
3449 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3451 int r1 = get_field(s, l1);
3452 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3453 set_cc_static(s);
3454 return DISAS_NEXT;
3457 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3459 int r1 = get_field(s, l1);
3460 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3461 set_cc_static(s);
3462 return DISAS_NEXT;
3464 #endif
3466 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3468 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3469 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3470 tcg_temp_free_i32(l);
3471 return DISAS_NEXT;
3474 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3476 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3477 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3478 tcg_temp_free_i32(l);
3479 return DISAS_NEXT;
3482 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3484 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3485 set_cc_static(s);
3486 return DISAS_NEXT;
3489 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3491 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3492 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3494 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3495 tcg_temp_free_i32(t1);
3496 tcg_temp_free_i32(t2);
3497 set_cc_static(s);
3498 return DISAS_NEXT;
3501 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3503 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3504 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3505 tcg_temp_free_i32(l);
3506 return DISAS_NEXT;
3509 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3511 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3512 return DISAS_NEXT;
3515 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3517 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3518 return DISAS_NEXT;
3521 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3523 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3524 return DISAS_NEXT;
3527 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3529 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3530 return DISAS_NEXT;
3533 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3535 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3536 return DISAS_NEXT;
3539 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3541 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3542 return_low128(o->out2);
3543 return DISAS_NEXT;
3546 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3548 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3549 return_low128(o->out2);
3550 return DISAS_NEXT;
3553 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3555 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3556 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3557 tcg_temp_free_i64(r3);
3558 return DISAS_NEXT;
3561 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3563 TCGv_i64 r3 = load_freg(get_field(s, r3));
3564 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3565 tcg_temp_free_i64(r3);
3566 return DISAS_NEXT;
3569 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3571 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3572 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3573 tcg_temp_free_i64(r3);
3574 return DISAS_NEXT;
3577 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3579 TCGv_i64 r3 = load_freg(get_field(s, r3));
3580 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3581 tcg_temp_free_i64(r3);
3582 return DISAS_NEXT;
3585 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3587 TCGv_i64 z, n;
3588 z = tcg_const_i64(0);
3589 n = tcg_temp_new_i64();
3590 tcg_gen_neg_i64(n, o->in2);
3591 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3592 tcg_temp_free_i64(n);
3593 tcg_temp_free_i64(z);
3594 return DISAS_NEXT;
3597 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3599 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3600 return DISAS_NEXT;
3603 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3605 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3606 return DISAS_NEXT;
3609 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3611 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3612 tcg_gen_mov_i64(o->out2, o->in2);
3613 return DISAS_NEXT;
3616 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3618 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3619 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3620 tcg_temp_free_i32(l);
3621 set_cc_static(s);
3622 return DISAS_NEXT;
3625 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3627 tcg_gen_neg_i64(o->out, o->in2);
3628 return DISAS_NEXT;
3631 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3633 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3634 return DISAS_NEXT;
3637 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3639 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3640 return DISAS_NEXT;
3643 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3645 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3646 tcg_gen_mov_i64(o->out2, o->in2);
3647 return DISAS_NEXT;
3650 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3652 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3653 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3654 tcg_temp_free_i32(l);
3655 set_cc_static(s);
3656 return DISAS_NEXT;
3659 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3661 tcg_gen_or_i64(o->out, o->in1, o->in2);
3662 return DISAS_NEXT;
3665 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3667 int shift = s->insn->data & 0xff;
3668 int size = s->insn->data >> 8;
3669 uint64_t mask = ((1ull << size) - 1) << shift;
3671 assert(!o->g_in2);
3672 tcg_gen_shli_i64(o->in2, o->in2, shift);
3673 tcg_gen_or_i64(o->out, o->in1, o->in2);
3675 /* Produce the CC from only the bits manipulated. */
3676 tcg_gen_andi_i64(cc_dst, o->out, mask);
3677 set_cc_nz_u64(s, cc_dst);
3678 return DISAS_NEXT;
3681 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3683 o->in1 = tcg_temp_new_i64();
3685 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3686 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3687 } else {
3688 /* Perform the atomic operation in memory. */
3689 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3690 s->insn->data);
3693 /* Recompute also for atomic case: needed for setting CC. */
3694 tcg_gen_or_i64(o->out, o->in1, o->in2);
3696 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3697 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3699 return DISAS_NEXT;
3702 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3704 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3705 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3706 tcg_temp_free_i32(l);
3707 return DISAS_NEXT;
3710 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3712 int l2 = get_field(s, l2) + 1;
3713 TCGv_i32 l;
3715 /* The length must not exceed 32 bytes. */
3716 if (l2 > 32) {
3717 gen_program_exception(s, PGM_SPECIFICATION);
3718 return DISAS_NORETURN;
3720 l = tcg_const_i32(l2);
3721 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3722 tcg_temp_free_i32(l);
3723 return DISAS_NEXT;
3726 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3728 int l2 = get_field(s, l2) + 1;
3729 TCGv_i32 l;
3731 /* The length must be even and should not exceed 64 bytes. */
3732 if ((l2 & 1) || (l2 > 64)) {
3733 gen_program_exception(s, PGM_SPECIFICATION);
3734 return DISAS_NORETURN;
3736 l = tcg_const_i32(l2);
3737 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3738 tcg_temp_free_i32(l);
3739 return DISAS_NEXT;
3742 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3744 gen_helper_popcnt(o->out, o->in2);
3745 return DISAS_NEXT;
3748 #ifndef CONFIG_USER_ONLY
3749 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3751 gen_helper_ptlb(cpu_env);
3752 return DISAS_NEXT;
3754 #endif
3756 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3758 int i3 = get_field(s, i3);
3759 int i4 = get_field(s, i4);
3760 int i5 = get_field(s, i5);
3761 int do_zero = i4 & 0x80;
3762 uint64_t mask, imask, pmask;
3763 int pos, len, rot;
3765 /* Adjust the arguments for the specific insn. */
3766 switch (s->fields.op2) {
3767 case 0x55: /* risbg */
3768 case 0x59: /* risbgn */
3769 i3 &= 63;
3770 i4 &= 63;
3771 pmask = ~0;
3772 break;
3773 case 0x5d: /* risbhg */
3774 i3 &= 31;
3775 i4 &= 31;
3776 pmask = 0xffffffff00000000ull;
3777 break;
3778 case 0x51: /* risblg */
3779 i3 &= 31;
3780 i4 &= 31;
3781 pmask = 0x00000000ffffffffull;
3782 break;
3783 default:
3784 g_assert_not_reached();
3787 /* MASK is the set of bits to be inserted from R2.
3788 Take care for I3/I4 wraparound. */
3789 mask = pmask >> i3;
3790 if (i3 <= i4) {
3791 mask ^= pmask >> i4 >> 1;
3792 } else {
3793 mask |= ~(pmask >> i4 >> 1);
3795 mask &= pmask;
3797 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3798 insns, we need to keep the other half of the register. */
3799 imask = ~mask | ~pmask;
3800 if (do_zero) {
3801 imask = ~pmask;
3804 len = i4 - i3 + 1;
3805 pos = 63 - i4;
3806 rot = i5 & 63;
3807 if (s->fields.op2 == 0x5d) {
3808 pos += 32;
3811 /* In some cases we can implement this with extract. */
3812 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3813 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3814 return DISAS_NEXT;
3817 /* In some cases we can implement this with deposit. */
3818 if (len > 0 && (imask == 0 || ~mask == imask)) {
3819 /* Note that we rotate the bits to be inserted to the lsb, not to
3820 the position as described in the PoO. */
3821 rot = (rot - pos) & 63;
3822 } else {
3823 pos = -1;
3826 /* Rotate the input as necessary. */
3827 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3829 /* Insert the selected bits into the output. */
3830 if (pos >= 0) {
3831 if (imask == 0) {
3832 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3833 } else {
3834 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3836 } else if (imask == 0) {
3837 tcg_gen_andi_i64(o->out, o->in2, mask);
3838 } else {
3839 tcg_gen_andi_i64(o->in2, o->in2, mask);
3840 tcg_gen_andi_i64(o->out, o->out, imask);
3841 tcg_gen_or_i64(o->out, o->out, o->in2);
3843 return DISAS_NEXT;
3846 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3848 int i3 = get_field(s, i3);
3849 int i4 = get_field(s, i4);
3850 int i5 = get_field(s, i5);
3851 uint64_t mask;
3853 /* If this is a test-only form, arrange to discard the result. */
3854 if (i3 & 0x80) {
3855 o->out = tcg_temp_new_i64();
3856 o->g_out = false;
3859 i3 &= 63;
3860 i4 &= 63;
3861 i5 &= 63;
3863 /* MASK is the set of bits to be operated on from R2.
3864 Take care for I3/I4 wraparound. */
3865 mask = ~0ull >> i3;
3866 if (i3 <= i4) {
3867 mask ^= ~0ull >> i4 >> 1;
3868 } else {
3869 mask |= ~(~0ull >> i4 >> 1);
3872 /* Rotate the input as necessary. */
3873 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3875 /* Operate. */
3876 switch (s->fields.op2) {
3877 case 0x54: /* AND */
3878 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3879 tcg_gen_and_i64(o->out, o->out, o->in2);
3880 break;
3881 case 0x56: /* OR */
3882 tcg_gen_andi_i64(o->in2, o->in2, mask);
3883 tcg_gen_or_i64(o->out, o->out, o->in2);
3884 break;
3885 case 0x57: /* XOR */
3886 tcg_gen_andi_i64(o->in2, o->in2, mask);
3887 tcg_gen_xor_i64(o->out, o->out, o->in2);
3888 break;
3889 default:
3890 abort();
3893 /* Set the CC. */
3894 tcg_gen_andi_i64(cc_dst, o->out, mask);
3895 set_cc_nz_u64(s, cc_dst);
3896 return DISAS_NEXT;
3899 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3901 tcg_gen_bswap16_i64(o->out, o->in2);
3902 return DISAS_NEXT;
3905 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3907 tcg_gen_bswap32_i64(o->out, o->in2);
3908 return DISAS_NEXT;
3911 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3913 tcg_gen_bswap64_i64(o->out, o->in2);
3914 return DISAS_NEXT;
3917 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3919 TCGv_i32 t1 = tcg_temp_new_i32();
3920 TCGv_i32 t2 = tcg_temp_new_i32();
3921 TCGv_i32 to = tcg_temp_new_i32();
3922 tcg_gen_extrl_i64_i32(t1, o->in1);
3923 tcg_gen_extrl_i64_i32(t2, o->in2);
3924 tcg_gen_rotl_i32(to, t1, t2);
3925 tcg_gen_extu_i32_i64(o->out, to);
3926 tcg_temp_free_i32(t1);
3927 tcg_temp_free_i32(t2);
3928 tcg_temp_free_i32(to);
3929 return DISAS_NEXT;
3932 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3934 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3935 return DISAS_NEXT;
3938 #ifndef CONFIG_USER_ONLY
3939 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3941 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3942 set_cc_static(s);
3943 return DISAS_NEXT;
3946 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3948 gen_helper_sacf(cpu_env, o->in2);
3949 /* Addressing mode has changed, so end the block. */
3950 return DISAS_PC_STALE;
3952 #endif
3954 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3956 int sam = s->insn->data;
3957 TCGv_i64 tsam;
3958 uint64_t mask;
3960 switch (sam) {
3961 case 0:
3962 mask = 0xffffff;
3963 break;
3964 case 1:
3965 mask = 0x7fffffff;
3966 break;
3967 default:
3968 mask = -1;
3969 break;
3972 /* Bizarre but true, we check the address of the current insn for the
3973 specification exception, not the next to be executed. Thus the PoO
3974 documents that Bad Things Happen two bytes before the end. */
3975 if (s->base.pc_next & ~mask) {
3976 gen_program_exception(s, PGM_SPECIFICATION);
3977 return DISAS_NORETURN;
3979 s->pc_tmp &= mask;
3981 tsam = tcg_const_i64(sam);
3982 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3983 tcg_temp_free_i64(tsam);
3985 /* Always exit the TB, since we (may have) changed execution mode. */
3986 return DISAS_PC_STALE;
3989 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3991 int r1 = get_field(s, r1);
3992 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3993 return DISAS_NEXT;
3996 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3998 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3999 return DISAS_NEXT;
4002 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4004 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4005 return DISAS_NEXT;
4008 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4010 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4011 return_low128(o->out2);
4012 return DISAS_NEXT;
4015 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4017 gen_helper_sqeb(o->out, cpu_env, o->in2);
4018 return DISAS_NEXT;
4021 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4023 gen_helper_sqdb(o->out, cpu_env, o->in2);
4024 return DISAS_NEXT;
4027 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4029 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4030 return_low128(o->out2);
4031 return DISAS_NEXT;
4034 #ifndef CONFIG_USER_ONLY
4035 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4037 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4038 set_cc_static(s);
4039 return DISAS_NEXT;
4042 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4044 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4045 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4046 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4047 set_cc_static(s);
4048 tcg_temp_free_i32(r1);
4049 tcg_temp_free_i32(r3);
4050 return DISAS_NEXT;
4052 #endif
4054 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4056 DisasCompare c;
4057 TCGv_i64 a, h;
4058 TCGLabel *lab;
4059 int r1;
4061 disas_jcc(s, &c, get_field(s, m3));
4063 /* We want to store when the condition is fulfilled, so branch
4064 out when it's not */
4065 c.cond = tcg_invert_cond(c.cond);
4067 lab = gen_new_label();
4068 if (c.is_64) {
4069 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4070 } else {
4071 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4073 free_compare(&c);
4075 r1 = get_field(s, r1);
4076 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4077 switch (s->insn->data) {
4078 case 1: /* STOCG */
4079 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4080 break;
4081 case 0: /* STOC */
4082 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4083 break;
4084 case 2: /* STOCFH */
4085 h = tcg_temp_new_i64();
4086 tcg_gen_shri_i64(h, regs[r1], 32);
4087 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4088 tcg_temp_free_i64(h);
4089 break;
4090 default:
4091 g_assert_not_reached();
4093 tcg_temp_free_i64(a);
4095 gen_set_label(lab);
4096 return DISAS_NEXT;
4099 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4101 uint64_t sign = 1ull << s->insn->data;
4102 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4103 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4104 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4105 /* The arithmetic left shift is curious in that it does not affect
4106 the sign bit. Copy that over from the source unchanged. */
4107 tcg_gen_andi_i64(o->out, o->out, ~sign);
4108 tcg_gen_andi_i64(o->in1, o->in1, sign);
4109 tcg_gen_or_i64(o->out, o->out, o->in1);
4110 return DISAS_NEXT;
4113 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4115 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4116 return DISAS_NEXT;
4119 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4121 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4122 return DISAS_NEXT;
4125 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4127 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4128 return DISAS_NEXT;
4131 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4133 gen_helper_sfpc(cpu_env, o->in2);
4134 return DISAS_NEXT;
4137 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4139 gen_helper_sfas(cpu_env, o->in2);
4140 return DISAS_NEXT;
4143 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4145 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4146 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4147 gen_helper_srnm(cpu_env, o->addr1);
4148 return DISAS_NEXT;
4151 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4153 /* Bits 0-55 are are ignored. */
4154 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4155 gen_helper_srnm(cpu_env, o->addr1);
4156 return DISAS_NEXT;
4159 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4161 TCGv_i64 tmp = tcg_temp_new_i64();
4163 /* Bits other than 61-63 are ignored. */
4164 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4166 /* No need to call a helper, we don't implement dfp */
4167 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4168 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4169 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4171 tcg_temp_free_i64(tmp);
4172 return DISAS_NEXT;
4175 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4177 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4178 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4179 set_cc_static(s);
4181 tcg_gen_shri_i64(o->in1, o->in1, 24);
4182 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4183 return DISAS_NEXT;
4186 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4188 int b1 = get_field(s, b1);
4189 int d1 = get_field(s, d1);
4190 int b2 = get_field(s, b2);
4191 int d2 = get_field(s, d2);
4192 int r3 = get_field(s, r3);
4193 TCGv_i64 tmp = tcg_temp_new_i64();
4195 /* fetch all operands first */
4196 o->in1 = tcg_temp_new_i64();
4197 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4198 o->in2 = tcg_temp_new_i64();
4199 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4200 o->addr1 = get_address(s, 0, r3, 0);
4202 /* load the third operand into r3 before modifying anything */
4203 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4205 /* subtract CPU timer from first operand and store in GR0 */
4206 gen_helper_stpt(tmp, cpu_env);
4207 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4209 /* store second operand in GR1 */
4210 tcg_gen_mov_i64(regs[1], o->in2);
4212 tcg_temp_free_i64(tmp);
4213 return DISAS_NEXT;
4216 #ifndef CONFIG_USER_ONLY
4217 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4219 tcg_gen_shri_i64(o->in2, o->in2, 4);
4220 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4221 return DISAS_NEXT;
4224 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4226 gen_helper_sske(cpu_env, o->in1, o->in2);
4227 return DISAS_NEXT;
4230 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4232 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4233 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4234 return DISAS_PC_STALE_NOCHAIN;
4237 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4239 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4240 return DISAS_NEXT;
4242 #endif
4244 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4246 gen_helper_stck(o->out, cpu_env);
4247 /* ??? We don't implement clock states. */
4248 gen_op_movi_cc(s, 0);
4249 return DISAS_NEXT;
4252 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4254 TCGv_i64 c1 = tcg_temp_new_i64();
4255 TCGv_i64 c2 = tcg_temp_new_i64();
4256 TCGv_i64 todpr = tcg_temp_new_i64();
4257 gen_helper_stck(c1, cpu_env);
4258 /* 16 bit value store in an uint32_t (only valid bits set) */
4259 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4260 /* Shift the 64-bit value into its place as a zero-extended
4261 104-bit value. Note that "bit positions 64-103 are always
4262 non-zero so that they compare differently to STCK"; we set
4263 the least significant bit to 1. */
4264 tcg_gen_shli_i64(c2, c1, 56);
4265 tcg_gen_shri_i64(c1, c1, 8);
4266 tcg_gen_ori_i64(c2, c2, 0x10000);
4267 tcg_gen_or_i64(c2, c2, todpr);
4268 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4269 tcg_gen_addi_i64(o->in2, o->in2, 8);
4270 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4271 tcg_temp_free_i64(c1);
4272 tcg_temp_free_i64(c2);
4273 tcg_temp_free_i64(todpr);
4274 /* ??? We don't implement clock states. */
4275 gen_op_movi_cc(s, 0);
4276 return DISAS_NEXT;
4279 #ifndef CONFIG_USER_ONLY
4280 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4282 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4283 gen_helper_sck(cc_op, cpu_env, o->in1);
4284 set_cc_static(s);
4285 return DISAS_NEXT;
4288 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4290 gen_helper_sckc(cpu_env, o->in2);
4291 return DISAS_NEXT;
4294 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4296 gen_helper_sckpf(cpu_env, regs[0]);
4297 return DISAS_NEXT;
4300 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4302 gen_helper_stckc(o->out, cpu_env);
4303 return DISAS_NEXT;
4306 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4308 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4309 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4310 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4311 tcg_temp_free_i32(r1);
4312 tcg_temp_free_i32(r3);
4313 return DISAS_NEXT;
4316 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4318 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4319 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4320 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4321 tcg_temp_free_i32(r1);
4322 tcg_temp_free_i32(r3);
4323 return DISAS_NEXT;
4326 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4328 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4329 return DISAS_NEXT;
4332 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4334 gen_helper_spt(cpu_env, o->in2);
4335 return DISAS_NEXT;
4338 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4340 gen_helper_stfl(cpu_env);
4341 return DISAS_NEXT;
4344 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4346 gen_helper_stpt(o->out, cpu_env);
4347 return DISAS_NEXT;
4350 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4352 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4353 set_cc_static(s);
4354 return DISAS_NEXT;
4357 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4359 gen_helper_spx(cpu_env, o->in2);
4360 return DISAS_NEXT;
4363 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4365 gen_helper_xsch(cpu_env, regs[1]);
4366 set_cc_static(s);
4367 return DISAS_NEXT;
4370 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4372 gen_helper_csch(cpu_env, regs[1]);
4373 set_cc_static(s);
4374 return DISAS_NEXT;
4377 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4379 gen_helper_hsch(cpu_env, regs[1]);
4380 set_cc_static(s);
4381 return DISAS_NEXT;
4384 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4386 gen_helper_msch(cpu_env, regs[1], o->in2);
4387 set_cc_static(s);
4388 return DISAS_NEXT;
4391 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4393 gen_helper_rchp(cpu_env, regs[1]);
4394 set_cc_static(s);
4395 return DISAS_NEXT;
4398 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4400 gen_helper_rsch(cpu_env, regs[1]);
4401 set_cc_static(s);
4402 return DISAS_NEXT;
4405 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4407 gen_helper_sal(cpu_env, regs[1]);
4408 return DISAS_NEXT;
4411 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4413 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4414 return DISAS_NEXT;
4417 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4419 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4420 gen_op_movi_cc(s, 3);
4421 return DISAS_NEXT;
4424 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4426 /* The instruction is suppressed if not provided. */
4427 return DISAS_NEXT;
4430 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4432 gen_helper_ssch(cpu_env, regs[1], o->in2);
4433 set_cc_static(s);
4434 return DISAS_NEXT;
4437 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4439 gen_helper_stsch(cpu_env, regs[1], o->in2);
4440 set_cc_static(s);
4441 return DISAS_NEXT;
4444 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4446 gen_helper_stcrw(cpu_env, o->in2);
4447 set_cc_static(s);
4448 return DISAS_NEXT;
4451 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4453 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4454 set_cc_static(s);
4455 return DISAS_NEXT;
4458 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4460 gen_helper_tsch(cpu_env, regs[1], o->in2);
4461 set_cc_static(s);
4462 return DISAS_NEXT;
4465 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4467 gen_helper_chsc(cpu_env, o->in2);
4468 set_cc_static(s);
4469 return DISAS_NEXT;
4472 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4474 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4475 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4476 return DISAS_NEXT;
4479 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4481 uint64_t i2 = get_field(s, i2);
4482 TCGv_i64 t;
4484 /* It is important to do what the instruction name says: STORE THEN.
4485 If we let the output hook perform the store then if we fault and
4486 restart, we'll have the wrong SYSTEM MASK in place. */
4487 t = tcg_temp_new_i64();
4488 tcg_gen_shri_i64(t, psw_mask, 56);
4489 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4490 tcg_temp_free_i64(t);
4492 if (s->fields.op == 0xac) {
4493 tcg_gen_andi_i64(psw_mask, psw_mask,
4494 (i2 << 56) | 0x00ffffffffffffffull);
4495 } else {
4496 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4499 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4500 return DISAS_PC_STALE_NOCHAIN;
4503 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4505 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
4506 tcg_gen_qemu_st_tl(o->in1, o->addr1, MMU_REAL_IDX, s->insn->data);
4508 if (s->base.tb->flags & FLAG_MASK_PER) {
4509 update_psw_addr(s);
4510 gen_helper_per_store_real(cpu_env);
4512 return DISAS_NEXT;
4514 #endif
4516 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4518 gen_helper_stfle(cc_op, cpu_env, o->in2);
4519 set_cc_static(s);
4520 return DISAS_NEXT;
4523 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4525 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4526 return DISAS_NEXT;
4529 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4531 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4532 return DISAS_NEXT;
4535 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4537 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4538 return DISAS_NEXT;
4541 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4543 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4544 return DISAS_NEXT;
4547 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4549 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4550 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4551 gen_helper_stam(cpu_env, r1, o->in2, r3);
4552 tcg_temp_free_i32(r1);
4553 tcg_temp_free_i32(r3);
4554 return DISAS_NEXT;
4557 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4559 int m3 = get_field(s, m3);
4560 int pos, base = s->insn->data;
4561 TCGv_i64 tmp = tcg_temp_new_i64();
4563 pos = base + ctz32(m3) * 8;
4564 switch (m3) {
4565 case 0xf:
4566 /* Effectively a 32-bit store. */
4567 tcg_gen_shri_i64(tmp, o->in1, pos);
4568 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4569 break;
4571 case 0xc:
4572 case 0x6:
4573 case 0x3:
4574 /* Effectively a 16-bit store. */
4575 tcg_gen_shri_i64(tmp, o->in1, pos);
4576 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4577 break;
4579 case 0x8:
4580 case 0x4:
4581 case 0x2:
4582 case 0x1:
4583 /* Effectively an 8-bit store. */
4584 tcg_gen_shri_i64(tmp, o->in1, pos);
4585 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4586 break;
4588 default:
4589 /* This is going to be a sequence of shifts and stores. */
4590 pos = base + 32 - 8;
4591 while (m3) {
4592 if (m3 & 0x8) {
4593 tcg_gen_shri_i64(tmp, o->in1, pos);
4594 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4595 tcg_gen_addi_i64(o->in2, o->in2, 1);
4597 m3 = (m3 << 1) & 0xf;
4598 pos -= 8;
4600 break;
4602 tcg_temp_free_i64(tmp);
4603 return DISAS_NEXT;
4606 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4608 int r1 = get_field(s, r1);
4609 int r3 = get_field(s, r3);
4610 int size = s->insn->data;
4611 TCGv_i64 tsize = tcg_const_i64(size);
4613 while (1) {
4614 if (size == 8) {
4615 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4616 } else {
4617 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4619 if (r1 == r3) {
4620 break;
4622 tcg_gen_add_i64(o->in2, o->in2, tsize);
4623 r1 = (r1 + 1) & 15;
4626 tcg_temp_free_i64(tsize);
4627 return DISAS_NEXT;
4630 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4632 int r1 = get_field(s, r1);
4633 int r3 = get_field(s, r3);
4634 TCGv_i64 t = tcg_temp_new_i64();
4635 TCGv_i64 t4 = tcg_const_i64(4);
4636 TCGv_i64 t32 = tcg_const_i64(32);
4638 while (1) {
4639 tcg_gen_shl_i64(t, regs[r1], t32);
4640 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4641 if (r1 == r3) {
4642 break;
4644 tcg_gen_add_i64(o->in2, o->in2, t4);
4645 r1 = (r1 + 1) & 15;
4648 tcg_temp_free_i64(t);
4649 tcg_temp_free_i64(t4);
4650 tcg_temp_free_i64(t32);
4651 return DISAS_NEXT;
4654 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4656 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4657 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4658 } else if (HAVE_ATOMIC128) {
4659 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4660 } else {
4661 gen_helper_exit_atomic(cpu_env);
4662 return DISAS_NORETURN;
4664 return DISAS_NEXT;
4667 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4669 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4670 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4672 gen_helper_srst(cpu_env, r1, r2);
4674 tcg_temp_free_i32(r1);
4675 tcg_temp_free_i32(r2);
4676 set_cc_static(s);
4677 return DISAS_NEXT;
4680 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4682 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4683 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4685 gen_helper_srstu(cpu_env, r1, r2);
4687 tcg_temp_free_i32(r1);
4688 tcg_temp_free_i32(r2);
4689 set_cc_static(s);
4690 return DISAS_NEXT;
4693 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4695 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4696 return DISAS_NEXT;
4699 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4701 DisasCompare cmp;
4702 TCGv_i64 borrow;
4704 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4706 /* The !borrow flag is the msb of CC. Since we want the inverse of
4707 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4708 disas_jcc(s, &cmp, 8 | 4);
4709 borrow = tcg_temp_new_i64();
4710 if (cmp.is_64) {
4711 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4712 } else {
4713 TCGv_i32 t = tcg_temp_new_i32();
4714 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4715 tcg_gen_extu_i32_i64(borrow, t);
4716 tcg_temp_free_i32(t);
4718 free_compare(&cmp);
4720 tcg_gen_sub_i64(o->out, o->out, borrow);
4721 tcg_temp_free_i64(borrow);
4722 return DISAS_NEXT;
4725 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4727 TCGv_i32 t;
4729 update_psw_addr(s);
4730 update_cc_op(s);
4732 t = tcg_const_i32(get_field(s, i1) & 0xff);
4733 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4734 tcg_temp_free_i32(t);
4736 t = tcg_const_i32(s->ilen);
4737 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4738 tcg_temp_free_i32(t);
4740 gen_exception(EXCP_SVC);
4741 return DISAS_NORETURN;
4744 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4746 int cc = 0;
4748 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4749 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4750 gen_op_movi_cc(s, cc);
4751 return DISAS_NEXT;
4754 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4756 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4757 set_cc_static(s);
4758 return DISAS_NEXT;
4761 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4763 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4764 set_cc_static(s);
4765 return DISAS_NEXT;
4768 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4770 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4771 set_cc_static(s);
4772 return DISAS_NEXT;
4775 #ifndef CONFIG_USER_ONLY
4777 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4779 gen_helper_testblock(cc_op, cpu_env, o->in2);
4780 set_cc_static(s);
4781 return DISAS_NEXT;
4784 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4786 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4787 set_cc_static(s);
4788 return DISAS_NEXT;
4791 #endif
4793 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4795 TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4796 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4797 tcg_temp_free_i32(l1);
4798 set_cc_static(s);
4799 return DISAS_NEXT;
4802 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4804 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4805 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4806 tcg_temp_free_i32(l);
4807 set_cc_static(s);
4808 return DISAS_NEXT;
4811 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4813 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4814 return_low128(o->out2);
4815 set_cc_static(s);
4816 return DISAS_NEXT;
4819 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4821 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4822 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4823 tcg_temp_free_i32(l);
4824 set_cc_static(s);
4825 return DISAS_NEXT;
4828 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4830 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4831 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4832 tcg_temp_free_i32(l);
4833 set_cc_static(s);
4834 return DISAS_NEXT;
4837 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4839 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4840 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4841 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4842 TCGv_i32 tst = tcg_temp_new_i32();
4843 int m3 = get_field(s, m3);
4845 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4846 m3 = 0;
4848 if (m3 & 1) {
4849 tcg_gen_movi_i32(tst, -1);
4850 } else {
4851 tcg_gen_extrl_i64_i32(tst, regs[0]);
4852 if (s->insn->opc & 3) {
4853 tcg_gen_ext8u_i32(tst, tst);
4854 } else {
4855 tcg_gen_ext16u_i32(tst, tst);
4858 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4860 tcg_temp_free_i32(r1);
4861 tcg_temp_free_i32(r2);
4862 tcg_temp_free_i32(sizes);
4863 tcg_temp_free_i32(tst);
4864 set_cc_static(s);
4865 return DISAS_NEXT;
4868 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4870 TCGv_i32 t1 = tcg_const_i32(0xff);
4871 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4872 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4873 tcg_temp_free_i32(t1);
4874 set_cc_static(s);
4875 return DISAS_NEXT;
4878 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4880 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4881 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4882 tcg_temp_free_i32(l);
4883 return DISAS_NEXT;
4886 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4888 int l1 = get_field(s, l1) + 1;
4889 TCGv_i32 l;
4891 /* The length must not exceed 32 bytes. */
4892 if (l1 > 32) {
4893 gen_program_exception(s, PGM_SPECIFICATION);
4894 return DISAS_NORETURN;
4896 l = tcg_const_i32(l1);
4897 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4898 tcg_temp_free_i32(l);
4899 set_cc_static(s);
4900 return DISAS_NEXT;
4903 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4905 int l1 = get_field(s, l1) + 1;
4906 TCGv_i32 l;
4908 /* The length must be even and should not exceed 64 bytes. */
4909 if ((l1 & 1) || (l1 > 64)) {
4910 gen_program_exception(s, PGM_SPECIFICATION);
4911 return DISAS_NORETURN;
4913 l = tcg_const_i32(l1);
4914 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4915 tcg_temp_free_i32(l);
4916 set_cc_static(s);
4917 return DISAS_NEXT;
4921 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4923 int d1 = get_field(s, d1);
4924 int d2 = get_field(s, d2);
4925 int b1 = get_field(s, b1);
4926 int b2 = get_field(s, b2);
4927 int l = get_field(s, l1);
4928 TCGv_i32 t32;
4930 o->addr1 = get_address(s, 0, b1, d1);
4932 /* If the addresses are identical, this is a store/memset of zero. */
4933 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4934 o->in2 = tcg_const_i64(0);
4936 l++;
4937 while (l >= 8) {
4938 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4939 l -= 8;
4940 if (l > 0) {
4941 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4944 if (l >= 4) {
4945 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4946 l -= 4;
4947 if (l > 0) {
4948 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4951 if (l >= 2) {
4952 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4953 l -= 2;
4954 if (l > 0) {
4955 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4958 if (l) {
4959 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4961 gen_op_movi_cc(s, 0);
4962 return DISAS_NEXT;
4965 /* But in general we'll defer to a helper. */
4966 o->in2 = get_address(s, 0, b2, d2);
4967 t32 = tcg_const_i32(l);
4968 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4969 tcg_temp_free_i32(t32);
4970 set_cc_static(s);
4971 return DISAS_NEXT;
4974 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4976 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4977 return DISAS_NEXT;
4980 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4982 int shift = s->insn->data & 0xff;
4983 int size = s->insn->data >> 8;
4984 uint64_t mask = ((1ull << size) - 1) << shift;
4986 assert(!o->g_in2);
4987 tcg_gen_shli_i64(o->in2, o->in2, shift);
4988 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4990 /* Produce the CC from only the bits manipulated. */
4991 tcg_gen_andi_i64(cc_dst, o->out, mask);
4992 set_cc_nz_u64(s, cc_dst);
4993 return DISAS_NEXT;
4996 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4998 o->in1 = tcg_temp_new_i64();
5000 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5001 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5002 } else {
5003 /* Perform the atomic operation in memory. */
5004 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5005 s->insn->data);
5008 /* Recompute also for atomic case: needed for setting CC. */
5009 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5011 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5012 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5014 return DISAS_NEXT;
5017 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5019 o->out = tcg_const_i64(0);
5020 return DISAS_NEXT;
5023 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5025 o->out = tcg_const_i64(0);
5026 o->out2 = o->out;
5027 o->g_out2 = true;
5028 return DISAS_NEXT;
5031 #ifndef CONFIG_USER_ONLY
5032 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5034 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5036 gen_helper_clp(cpu_env, r2);
5037 tcg_temp_free_i32(r2);
5038 set_cc_static(s);
5039 return DISAS_NEXT;
5042 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5044 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5045 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5047 gen_helper_pcilg(cpu_env, r1, r2);
5048 tcg_temp_free_i32(r1);
5049 tcg_temp_free_i32(r2);
5050 set_cc_static(s);
5051 return DISAS_NEXT;
5054 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5056 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5057 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5059 gen_helper_pcistg(cpu_env, r1, r2);
5060 tcg_temp_free_i32(r1);
5061 tcg_temp_free_i32(r2);
5062 set_cc_static(s);
5063 return DISAS_NEXT;
5066 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5068 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5069 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5071 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5072 tcg_temp_free_i32(ar);
5073 tcg_temp_free_i32(r1);
5074 set_cc_static(s);
5075 return DISAS_NEXT;
5078 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5080 gen_helper_sic(cpu_env, o->in1, o->in2);
5081 return DISAS_NEXT;
5084 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5086 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5087 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5089 gen_helper_rpcit(cpu_env, r1, r2);
5090 tcg_temp_free_i32(r1);
5091 tcg_temp_free_i32(r2);
5092 set_cc_static(s);
5093 return DISAS_NEXT;
5096 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5098 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5099 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5100 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5102 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5103 tcg_temp_free_i32(ar);
5104 tcg_temp_free_i32(r1);
5105 tcg_temp_free_i32(r3);
5106 set_cc_static(s);
5107 return DISAS_NEXT;
5110 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5112 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5113 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5115 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5116 tcg_temp_free_i32(ar);
5117 tcg_temp_free_i32(r1);
5118 set_cc_static(s);
5119 return DISAS_NEXT;
5121 #endif
5123 #include "translate_vx.inc.c"
5125 /* ====================================================================== */
5126 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5127 the original inputs), update the various cc data structures in order to
5128 be able to compute the new condition code. */
5130 static void cout_abs32(DisasContext *s, DisasOps *o)
5132 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5135 static void cout_abs64(DisasContext *s, DisasOps *o)
5137 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5140 static void cout_adds32(DisasContext *s, DisasOps *o)
5142 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5145 static void cout_adds64(DisasContext *s, DisasOps *o)
5147 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5150 static void cout_addu32(DisasContext *s, DisasOps *o)
5152 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5155 static void cout_addu64(DisasContext *s, DisasOps *o)
5157 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5160 static void cout_addc32(DisasContext *s, DisasOps *o)
5162 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5165 static void cout_addc64(DisasContext *s, DisasOps *o)
5167 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5170 static void cout_cmps32(DisasContext *s, DisasOps *o)
5172 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5175 static void cout_cmps64(DisasContext *s, DisasOps *o)
5177 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5180 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5182 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5185 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5187 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5190 static void cout_f32(DisasContext *s, DisasOps *o)
5192 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5195 static void cout_f64(DisasContext *s, DisasOps *o)
5197 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5200 static void cout_f128(DisasContext *s, DisasOps *o)
5202 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5205 static void cout_nabs32(DisasContext *s, DisasOps *o)
5207 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5210 static void cout_nabs64(DisasContext *s, DisasOps *o)
5212 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5215 static void cout_neg32(DisasContext *s, DisasOps *o)
5217 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5220 static void cout_neg64(DisasContext *s, DisasOps *o)
5222 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5225 static void cout_nz32(DisasContext *s, DisasOps *o)
5227 tcg_gen_ext32u_i64(cc_dst, o->out);
5228 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5231 static void cout_nz64(DisasContext *s, DisasOps *o)
5233 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5236 static void cout_s32(DisasContext *s, DisasOps *o)
5238 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5241 static void cout_s64(DisasContext *s, DisasOps *o)
5243 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5246 static void cout_subs32(DisasContext *s, DisasOps *o)
5248 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5251 static void cout_subs64(DisasContext *s, DisasOps *o)
5253 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5256 static void cout_subu32(DisasContext *s, DisasOps *o)
5258 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5261 static void cout_subu64(DisasContext *s, DisasOps *o)
5263 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5266 static void cout_subb32(DisasContext *s, DisasOps *o)
5268 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5271 static void cout_subb64(DisasContext *s, DisasOps *o)
5273 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5276 static void cout_tm32(DisasContext *s, DisasOps *o)
5278 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5281 static void cout_tm64(DisasContext *s, DisasOps *o)
5283 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5286 /* ====================================================================== */
5287 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5288 with the TCG register to which we will write. Used in combination with
5289 the "wout" generators, in some cases we need a new temporary, and in
5290 some cases we can write to a TCG global. */
5292 static void prep_new(DisasContext *s, DisasOps *o)
5294 o->out = tcg_temp_new_i64();
5296 #define SPEC_prep_new 0
5298 static void prep_new_P(DisasContext *s, DisasOps *o)
5300 o->out = tcg_temp_new_i64();
5301 o->out2 = tcg_temp_new_i64();
5303 #define SPEC_prep_new_P 0
5305 static void prep_r1(DisasContext *s, DisasOps *o)
5307 o->out = regs[get_field(s, r1)];
5308 o->g_out = true;
5310 #define SPEC_prep_r1 0
5312 static void prep_r1_P(DisasContext *s, DisasOps *o)
5314 int r1 = get_field(s, r1);
5315 o->out = regs[r1];
5316 o->out2 = regs[r1 + 1];
5317 o->g_out = o->g_out2 = true;
5319 #define SPEC_prep_r1_P SPEC_r1_even
5321 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5322 static void prep_x1(DisasContext *s, DisasOps *o)
5324 o->out = load_freg(get_field(s, r1));
5325 o->out2 = load_freg(get_field(s, r1) + 2);
5327 #define SPEC_prep_x1 SPEC_r1_f128
5329 /* ====================================================================== */
5330 /* The "Write OUTput" generators. These generally perform some non-trivial
5331 copy of data to TCG globals, or to main memory. The trivial cases are
5332 generally handled by having a "prep" generator install the TCG global
5333 as the destination of the operation. */
5335 static void wout_r1(DisasContext *s, DisasOps *o)
5337 store_reg(get_field(s, r1), o->out);
5339 #define SPEC_wout_r1 0
5341 static void wout_r1_8(DisasContext *s, DisasOps *o)
5343 int r1 = get_field(s, r1);
5344 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5346 #define SPEC_wout_r1_8 0
5348 static void wout_r1_16(DisasContext *s, DisasOps *o)
5350 int r1 = get_field(s, r1);
5351 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5353 #define SPEC_wout_r1_16 0
5355 static void wout_r1_32(DisasContext *s, DisasOps *o)
5357 store_reg32_i64(get_field(s, r1), o->out);
5359 #define SPEC_wout_r1_32 0
5361 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5363 store_reg32h_i64(get_field(s, r1), o->out);
5365 #define SPEC_wout_r1_32h 0
5367 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5369 int r1 = get_field(s, r1);
5370 store_reg32_i64(r1, o->out);
5371 store_reg32_i64(r1 + 1, o->out2);
5373 #define SPEC_wout_r1_P32 SPEC_r1_even
5375 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5377 int r1 = get_field(s, r1);
5378 store_reg32_i64(r1 + 1, o->out);
5379 tcg_gen_shri_i64(o->out, o->out, 32);
5380 store_reg32_i64(r1, o->out);
5382 #define SPEC_wout_r1_D32 SPEC_r1_even
5384 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5386 int r3 = get_field(s, r3);
5387 store_reg32_i64(r3, o->out);
5388 store_reg32_i64(r3 + 1, o->out2);
5390 #define SPEC_wout_r3_P32 SPEC_r3_even
5392 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5394 int r3 = get_field(s, r3);
5395 store_reg(r3, o->out);
5396 store_reg(r3 + 1, o->out2);
5398 #define SPEC_wout_r3_P64 SPEC_r3_even
5400 static void wout_e1(DisasContext *s, DisasOps *o)
5402 store_freg32_i64(get_field(s, r1), o->out);
5404 #define SPEC_wout_e1 0
5406 static void wout_f1(DisasContext *s, DisasOps *o)
5408 store_freg(get_field(s, r1), o->out);
5410 #define SPEC_wout_f1 0
5412 static void wout_x1(DisasContext *s, DisasOps *o)
5414 int f1 = get_field(s, r1);
5415 store_freg(f1, o->out);
5416 store_freg(f1 + 2, o->out2);
5418 #define SPEC_wout_x1 SPEC_r1_f128
5420 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5422 if (get_field(s, r1) != get_field(s, r2)) {
5423 store_reg32_i64(get_field(s, r1), o->out);
5426 #define SPEC_wout_cond_r1r2_32 0
5428 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5430 if (get_field(s, r1) != get_field(s, r2)) {
5431 store_freg32_i64(get_field(s, r1), o->out);
5434 #define SPEC_wout_cond_e1e2 0
5436 static void wout_m1_8(DisasContext *s, DisasOps *o)
5438 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5440 #define SPEC_wout_m1_8 0
5442 static void wout_m1_16(DisasContext *s, DisasOps *o)
5444 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5446 #define SPEC_wout_m1_16 0
5448 #ifndef CONFIG_USER_ONLY
5449 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5451 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5453 #define SPEC_wout_m1_16a 0
5454 #endif
5456 static void wout_m1_32(DisasContext *s, DisasOps *o)
5458 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5460 #define SPEC_wout_m1_32 0
5462 #ifndef CONFIG_USER_ONLY
5463 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5465 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5467 #define SPEC_wout_m1_32a 0
5468 #endif
5470 static void wout_m1_64(DisasContext *s, DisasOps *o)
5472 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5474 #define SPEC_wout_m1_64 0
5476 #ifndef CONFIG_USER_ONLY
5477 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5479 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5481 #define SPEC_wout_m1_64a 0
5482 #endif
5484 static void wout_m2_32(DisasContext *s, DisasOps *o)
5486 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5488 #define SPEC_wout_m2_32 0
5490 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5492 store_reg(get_field(s, r1), o->in2);
5494 #define SPEC_wout_in2_r1 0
5496 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5498 store_reg32_i64(get_field(s, r1), o->in2);
5500 #define SPEC_wout_in2_r1_32 0
5502 /* ====================================================================== */
5503 /* The "INput 1" generators. These load the first operand to an insn. */
5505 static void in1_r1(DisasContext *s, DisasOps *o)
5507 o->in1 = load_reg(get_field(s, r1));
5509 #define SPEC_in1_r1 0
5511 static void in1_r1_o(DisasContext *s, DisasOps *o)
5513 o->in1 = regs[get_field(s, r1)];
5514 o->g_in1 = true;
5516 #define SPEC_in1_r1_o 0
5518 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5520 o->in1 = tcg_temp_new_i64();
5521 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5523 #define SPEC_in1_r1_32s 0
5525 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5527 o->in1 = tcg_temp_new_i64();
5528 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5530 #define SPEC_in1_r1_32u 0
5532 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5534 o->in1 = tcg_temp_new_i64();
5535 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5537 #define SPEC_in1_r1_sr32 0
5539 static void in1_r1p1(DisasContext *s, DisasOps *o)
5541 o->in1 = load_reg(get_field(s, r1) + 1);
5543 #define SPEC_in1_r1p1 SPEC_r1_even
5545 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5547 o->in1 = tcg_temp_new_i64();
5548 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5550 #define SPEC_in1_r1p1_32s SPEC_r1_even
5552 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5554 o->in1 = tcg_temp_new_i64();
5555 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5557 #define SPEC_in1_r1p1_32u SPEC_r1_even
5559 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5561 int r1 = get_field(s, r1);
5562 o->in1 = tcg_temp_new_i64();
5563 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5565 #define SPEC_in1_r1_D32 SPEC_r1_even
5567 static void in1_r2(DisasContext *s, DisasOps *o)
5569 o->in1 = load_reg(get_field(s, r2));
5571 #define SPEC_in1_r2 0
5573 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5575 o->in1 = tcg_temp_new_i64();
5576 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5578 #define SPEC_in1_r2_sr32 0
5580 static void in1_r3(DisasContext *s, DisasOps *o)
5582 o->in1 = load_reg(get_field(s, r3));
5584 #define SPEC_in1_r3 0
5586 static void in1_r3_o(DisasContext *s, DisasOps *o)
5588 o->in1 = regs[get_field(s, r3)];
5589 o->g_in1 = true;
5591 #define SPEC_in1_r3_o 0
5593 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5595 o->in1 = tcg_temp_new_i64();
5596 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5598 #define SPEC_in1_r3_32s 0
5600 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5602 o->in1 = tcg_temp_new_i64();
5603 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5605 #define SPEC_in1_r3_32u 0
5607 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5609 int r3 = get_field(s, r3);
5610 o->in1 = tcg_temp_new_i64();
5611 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5613 #define SPEC_in1_r3_D32 SPEC_r3_even
5615 static void in1_e1(DisasContext *s, DisasOps *o)
5617 o->in1 = load_freg32_i64(get_field(s, r1));
5619 #define SPEC_in1_e1 0
5621 static void in1_f1(DisasContext *s, DisasOps *o)
5623 o->in1 = load_freg(get_field(s, r1));
5625 #define SPEC_in1_f1 0
5627 /* Load the high double word of an extended (128-bit) format FP number */
5628 static void in1_x2h(DisasContext *s, DisasOps *o)
5630 o->in1 = load_freg(get_field(s, r2));
5632 #define SPEC_in1_x2h SPEC_r2_f128
5634 static void in1_f3(DisasContext *s, DisasOps *o)
5636 o->in1 = load_freg(get_field(s, r3));
5638 #define SPEC_in1_f3 0
5640 static void in1_la1(DisasContext *s, DisasOps *o)
5642 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5644 #define SPEC_in1_la1 0
5646 static void in1_la2(DisasContext *s, DisasOps *o)
5648 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5649 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5651 #define SPEC_in1_la2 0
5653 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5655 in1_la1(s, o);
5656 o->in1 = tcg_temp_new_i64();
5657 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5659 #define SPEC_in1_m1_8u 0
5661 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5663 in1_la1(s, o);
5664 o->in1 = tcg_temp_new_i64();
5665 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5667 #define SPEC_in1_m1_16s 0
5669 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5671 in1_la1(s, o);
5672 o->in1 = tcg_temp_new_i64();
5673 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5675 #define SPEC_in1_m1_16u 0
5677 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5679 in1_la1(s, o);
5680 o->in1 = tcg_temp_new_i64();
5681 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5683 #define SPEC_in1_m1_32s 0
5685 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5687 in1_la1(s, o);
5688 o->in1 = tcg_temp_new_i64();
5689 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5691 #define SPEC_in1_m1_32u 0
5693 static void in1_m1_64(DisasContext *s, DisasOps *o)
5695 in1_la1(s, o);
5696 o->in1 = tcg_temp_new_i64();
5697 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5699 #define SPEC_in1_m1_64 0
5701 /* ====================================================================== */
5702 /* The "INput 2" generators. These load the second operand to an insn. */
5704 static void in2_r1_o(DisasContext *s, DisasOps *o)
5706 o->in2 = regs[get_field(s, r1)];
5707 o->g_in2 = true;
5709 #define SPEC_in2_r1_o 0
5711 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5713 o->in2 = tcg_temp_new_i64();
5714 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5716 #define SPEC_in2_r1_16u 0
5718 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5720 o->in2 = tcg_temp_new_i64();
5721 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5723 #define SPEC_in2_r1_32u 0
5725 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5727 int r1 = get_field(s, r1);
5728 o->in2 = tcg_temp_new_i64();
5729 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5731 #define SPEC_in2_r1_D32 SPEC_r1_even
5733 static void in2_r2(DisasContext *s, DisasOps *o)
5735 o->in2 = load_reg(get_field(s, r2));
5737 #define SPEC_in2_r2 0
5739 static void in2_r2_o(DisasContext *s, DisasOps *o)
5741 o->in2 = regs[get_field(s, r2)];
5742 o->g_in2 = true;
5744 #define SPEC_in2_r2_o 0
5746 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5748 int r2 = get_field(s, r2);
5749 if (r2 != 0) {
5750 o->in2 = load_reg(r2);
5753 #define SPEC_in2_r2_nz 0
5755 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5757 o->in2 = tcg_temp_new_i64();
5758 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5760 #define SPEC_in2_r2_8s 0
5762 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5764 o->in2 = tcg_temp_new_i64();
5765 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5767 #define SPEC_in2_r2_8u 0
5769 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5771 o->in2 = tcg_temp_new_i64();
5772 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5774 #define SPEC_in2_r2_16s 0
5776 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5778 o->in2 = tcg_temp_new_i64();
5779 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5781 #define SPEC_in2_r2_16u 0
5783 static void in2_r3(DisasContext *s, DisasOps *o)
5785 o->in2 = load_reg(get_field(s, r3));
5787 #define SPEC_in2_r3 0
5789 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5791 o->in2 = tcg_temp_new_i64();
5792 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5794 #define SPEC_in2_r3_sr32 0
5796 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5798 o->in2 = tcg_temp_new_i64();
5799 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5801 #define SPEC_in2_r3_32u 0
5803 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5805 o->in2 = tcg_temp_new_i64();
5806 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5808 #define SPEC_in2_r2_32s 0
5810 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5812 o->in2 = tcg_temp_new_i64();
5813 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5815 #define SPEC_in2_r2_32u 0
5817 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5819 o->in2 = tcg_temp_new_i64();
5820 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5822 #define SPEC_in2_r2_sr32 0
5824 static void in2_e2(DisasContext *s, DisasOps *o)
5826 o->in2 = load_freg32_i64(get_field(s, r2));
5828 #define SPEC_in2_e2 0
5830 static void in2_f2(DisasContext *s, DisasOps *o)
5832 o->in2 = load_freg(get_field(s, r2));
5834 #define SPEC_in2_f2 0
5836 /* Load the low double word of an extended (128-bit) format FP number */
5837 static void in2_x2l(DisasContext *s, DisasOps *o)
5839 o->in2 = load_freg(get_field(s, r2) + 2);
5841 #define SPEC_in2_x2l SPEC_r2_f128
5843 static void in2_ra2(DisasContext *s, DisasOps *o)
5845 o->in2 = get_address(s, 0, get_field(s, r2), 0);
5847 #define SPEC_in2_ra2 0
5849 static void in2_a2(DisasContext *s, DisasOps *o)
5851 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5852 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5854 #define SPEC_in2_a2 0
5856 static void in2_ri2(DisasContext *s, DisasOps *o)
5858 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5860 #define SPEC_in2_ri2 0
5862 static void in2_sh32(DisasContext *s, DisasOps *o)
5864 help_l2_shift(s, o, 31);
5866 #define SPEC_in2_sh32 0
5868 static void in2_sh64(DisasContext *s, DisasOps *o)
5870 help_l2_shift(s, o, 63);
5872 #define SPEC_in2_sh64 0
5874 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5876 in2_a2(s, o);
5877 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5879 #define SPEC_in2_m2_8u 0
5881 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5883 in2_a2(s, o);
5884 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5886 #define SPEC_in2_m2_16s 0
5888 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5890 in2_a2(s, o);
5891 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5893 #define SPEC_in2_m2_16u 0
5895 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5897 in2_a2(s, o);
5898 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5900 #define SPEC_in2_m2_32s 0
5902 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5904 in2_a2(s, o);
5905 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5907 #define SPEC_in2_m2_32u 0
5909 #ifndef CONFIG_USER_ONLY
5910 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5912 in2_a2(s, o);
5913 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5915 #define SPEC_in2_m2_32ua 0
5916 #endif
5918 static void in2_m2_64(DisasContext *s, DisasOps *o)
5920 in2_a2(s, o);
5921 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5923 #define SPEC_in2_m2_64 0
5925 #ifndef CONFIG_USER_ONLY
5926 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5928 in2_a2(s, o);
5929 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5931 #define SPEC_in2_m2_64a 0
5932 #endif
5934 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5936 in2_ri2(s, o);
5937 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5939 #define SPEC_in2_mri2_16u 0
5941 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5943 in2_ri2(s, o);
5944 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5946 #define SPEC_in2_mri2_32s 0
5948 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5950 in2_ri2(s, o);
5951 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5953 #define SPEC_in2_mri2_32u 0
5955 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5957 in2_ri2(s, o);
5958 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5960 #define SPEC_in2_mri2_64 0
5962 static void in2_i2(DisasContext *s, DisasOps *o)
5964 o->in2 = tcg_const_i64(get_field(s, i2));
5966 #define SPEC_in2_i2 0
5968 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5970 o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
5972 #define SPEC_in2_i2_8u 0
5974 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5976 o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
5978 #define SPEC_in2_i2_16u 0
5980 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5982 o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
5984 #define SPEC_in2_i2_32u 0
5986 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5988 uint64_t i2 = (uint16_t)get_field(s, i2);
5989 o->in2 = tcg_const_i64(i2 << s->insn->data);
5991 #define SPEC_in2_i2_16u_shl 0
5993 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5995 uint64_t i2 = (uint32_t)get_field(s, i2);
5996 o->in2 = tcg_const_i64(i2 << s->insn->data);
5998 #define SPEC_in2_i2_32u_shl 0
6000 #ifndef CONFIG_USER_ONLY
6001 static void in2_insn(DisasContext *s, DisasOps *o)
6003 o->in2 = tcg_const_i64(s->fields.raw_insn);
6005 #define SPEC_in2_insn 0
6006 #endif
6008 /* ====================================================================== */
6010 /* Find opc within the table of insns. This is formulated as a switch
6011 statement so that (1) we get compile-time notice of cut-paste errors
6012 for duplicated opcodes, and (2) the compiler generates the binary
6013 search tree, rather than us having to post-process the table. */
6015 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6016 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6018 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6019 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6021 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6022 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6024 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6026 enum DisasInsnEnum {
6027 #include "insn-data.def"
6030 #undef E
6031 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6032 .opc = OPC, \
6033 .flags = FL, \
6034 .fmt = FMT_##FT, \
6035 .fac = FAC_##FC, \
6036 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6037 .name = #NM, \
6038 .help_in1 = in1_##I1, \
6039 .help_in2 = in2_##I2, \
6040 .help_prep = prep_##P, \
6041 .help_wout = wout_##W, \
6042 .help_cout = cout_##CC, \
6043 .help_op = op_##OP, \
6044 .data = D \
6047 /* Allow 0 to be used for NULL in the table below. */
6048 #define in1_0 NULL
6049 #define in2_0 NULL
6050 #define prep_0 NULL
6051 #define wout_0 NULL
6052 #define cout_0 NULL
6053 #define op_0 NULL
6055 #define SPEC_in1_0 0
6056 #define SPEC_in2_0 0
6057 #define SPEC_prep_0 0
6058 #define SPEC_wout_0 0
6060 /* Give smaller names to the various facilities. */
6061 #define FAC_Z S390_FEAT_ZARCH
6062 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6063 #define FAC_DFP S390_FEAT_DFP
6064 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6065 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6066 #define FAC_EE S390_FEAT_EXECUTE_EXT
6067 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6068 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6069 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6070 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6071 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6072 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6073 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6074 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6075 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6076 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6077 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6078 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6079 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6080 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6081 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6082 #define FAC_SFLE S390_FEAT_STFLE
6083 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6084 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6085 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6086 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6087 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6088 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6089 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6090 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6091 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6092 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6093 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6094 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6095 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6096 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6097 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6098 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6099 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6100 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6102 static const DisasInsn insn_info[] = {
6103 #include "insn-data.def"
6106 #undef E
6107 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6108 case OPC: return &insn_info[insn_ ## NM];
6110 static const DisasInsn *lookup_opc(uint16_t opc)
6112 switch (opc) {
6113 #include "insn-data.def"
6114 default:
6115 return NULL;
6119 #undef F
6120 #undef E
6121 #undef D
6122 #undef C
6124 /* Extract a field from the insn. The INSN should be left-aligned in
6125 the uint64_t so that we can more easily utilize the big-bit-endian
6126 definitions we extract from the Principals of Operation. */
6128 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6130 uint32_t r, m;
6132 if (f->size == 0) {
6133 return;
6136 /* Zero extract the field from the insn. */
6137 r = (insn << f->beg) >> (64 - f->size);
6139 /* Sign-extend, or un-swap the field as necessary. */
6140 switch (f->type) {
6141 case 0: /* unsigned */
6142 break;
6143 case 1: /* signed */
6144 assert(f->size <= 32);
6145 m = 1u << (f->size - 1);
6146 r = (r ^ m) - m;
6147 break;
6148 case 2: /* dl+dh split, signed 20 bit. */
6149 r = ((int8_t)r << 12) | (r >> 8);
6150 break;
6151 case 3: /* MSB stored in RXB */
6152 g_assert(f->size == 4);
6153 switch (f->beg) {
6154 case 8:
6155 r |= extract64(insn, 63 - 36, 1) << 4;
6156 break;
6157 case 12:
6158 r |= extract64(insn, 63 - 37, 1) << 4;
6159 break;
6160 case 16:
6161 r |= extract64(insn, 63 - 38, 1) << 4;
6162 break;
6163 case 32:
6164 r |= extract64(insn, 63 - 39, 1) << 4;
6165 break;
6166 default:
6167 g_assert_not_reached();
6169 break;
6170 default:
6171 abort();
6174 /* Validate that the "compressed" encoding we selected above is valid.
6175 I.e. we havn't make two different original fields overlap. */
6176 assert(((o->presentC >> f->indexC) & 1) == 0);
6177 o->presentC |= 1 << f->indexC;
6178 o->presentO |= 1 << f->indexO;
6180 o->c[f->indexC] = r;
6183 /* Lookup the insn at the current PC, extracting the operands into O and
6184 returning the info struct for the insn. Returns NULL for invalid insn. */
6186 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6188 uint64_t insn, pc = s->base.pc_next;
6189 int op, op2, ilen;
6190 const DisasInsn *info;
6192 if (unlikely(s->ex_value)) {
6193 /* Drop the EX data now, so that it's clear on exception paths. */
6194 TCGv_i64 zero = tcg_const_i64(0);
6195 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6196 tcg_temp_free_i64(zero);
6198 /* Extract the values saved by EXECUTE. */
6199 insn = s->ex_value & 0xffffffffffff0000ull;
6200 ilen = s->ex_value & 0xf;
6201 op = insn >> 56;
6202 } else {
6203 insn = ld_code2(env, pc);
6204 op = (insn >> 8) & 0xff;
6205 ilen = get_ilen(op);
6206 switch (ilen) {
6207 case 2:
6208 insn = insn << 48;
6209 break;
6210 case 4:
6211 insn = ld_code4(env, pc) << 32;
6212 break;
6213 case 6:
6214 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6215 break;
6216 default:
6217 g_assert_not_reached();
6220 s->pc_tmp = s->base.pc_next + ilen;
6221 s->ilen = ilen;
6223 /* We can't actually determine the insn format until we've looked up
6224 the full insn opcode. Which we can't do without locating the
6225 secondary opcode. Assume by default that OP2 is at bit 40; for
6226 those smaller insns that don't actually have a secondary opcode
6227 this will correctly result in OP2 = 0. */
6228 switch (op) {
6229 case 0x01: /* E */
6230 case 0x80: /* S */
6231 case 0x82: /* S */
6232 case 0x93: /* S */
6233 case 0xb2: /* S, RRF, RRE, IE */
6234 case 0xb3: /* RRE, RRD, RRF */
6235 case 0xb9: /* RRE, RRF */
6236 case 0xe5: /* SSE, SIL */
6237 op2 = (insn << 8) >> 56;
6238 break;
6239 case 0xa5: /* RI */
6240 case 0xa7: /* RI */
6241 case 0xc0: /* RIL */
6242 case 0xc2: /* RIL */
6243 case 0xc4: /* RIL */
6244 case 0xc6: /* RIL */
6245 case 0xc8: /* SSF */
6246 case 0xcc: /* RIL */
6247 op2 = (insn << 12) >> 60;
6248 break;
6249 case 0xc5: /* MII */
6250 case 0xc7: /* SMI */
6251 case 0xd0 ... 0xdf: /* SS */
6252 case 0xe1: /* SS */
6253 case 0xe2: /* SS */
6254 case 0xe8: /* SS */
6255 case 0xe9: /* SS */
6256 case 0xea: /* SS */
6257 case 0xee ... 0xf3: /* SS */
6258 case 0xf8 ... 0xfd: /* SS */
6259 op2 = 0;
6260 break;
6261 default:
6262 op2 = (insn << 40) >> 56;
6263 break;
6266 memset(&s->fields, 0, sizeof(s->fields));
6267 s->fields.raw_insn = insn;
6268 s->fields.op = op;
6269 s->fields.op2 = op2;
6271 /* Lookup the instruction. */
6272 info = lookup_opc(op << 8 | op2);
6273 s->insn = info;
6275 /* If we found it, extract the operands. */
6276 if (info != NULL) {
6277 DisasFormat fmt = info->fmt;
6278 int i;
6280 for (i = 0; i < NUM_C_FIELD; ++i) {
6281 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6284 return info;
6287 static bool is_afp_reg(int reg)
6289 return reg % 2 || reg > 6;
6292 static bool is_fp_pair(int reg)
6294 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6295 return !(reg & 0x2);
6298 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6300 const DisasInsn *insn;
6301 DisasJumpType ret = DISAS_NEXT;
6302 DisasOps o = {};
6304 /* Search for the insn in the table. */
6305 insn = extract_insn(env, s);
6307 /* Emit insn_start now that we know the ILEN. */
6308 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6310 /* Not found means unimplemented/illegal opcode. */
6311 if (insn == NULL) {
6312 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6313 s->fields.op, s->fields.op2);
6314 gen_illegal_opcode(s);
6315 return DISAS_NORETURN;
6318 #ifndef CONFIG_USER_ONLY
6319 if (s->base.tb->flags & FLAG_MASK_PER) {
6320 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6321 gen_helper_per_ifetch(cpu_env, addr);
6322 tcg_temp_free_i64(addr);
6324 #endif
6326 /* process flags */
6327 if (insn->flags) {
6328 /* privileged instruction */
6329 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6330 gen_program_exception(s, PGM_PRIVILEGED);
6331 return DISAS_NORETURN;
6334 /* if AFP is not enabled, instructions and registers are forbidden */
6335 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6336 uint8_t dxc = 0;
6338 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6339 dxc = 1;
6341 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6342 dxc = 1;
6344 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6345 dxc = 1;
6347 if (insn->flags & IF_BFP) {
6348 dxc = 2;
6350 if (insn->flags & IF_DFP) {
6351 dxc = 3;
6353 if (insn->flags & IF_VEC) {
6354 dxc = 0xfe;
6356 if (dxc) {
6357 gen_data_exception(dxc);
6358 return DISAS_NORETURN;
6362 /* if vector instructions not enabled, executing them is forbidden */
6363 if (insn->flags & IF_VEC) {
6364 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6365 gen_data_exception(0xfe);
6366 return DISAS_NORETURN;
6371 /* Check for insn specification exceptions. */
6372 if (insn->spec) {
6373 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6374 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6375 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6376 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6377 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6378 gen_program_exception(s, PGM_SPECIFICATION);
6379 return DISAS_NORETURN;
6383 /* Implement the instruction. */
6384 if (insn->help_in1) {
6385 insn->help_in1(s, &o);
6387 if (insn->help_in2) {
6388 insn->help_in2(s, &o);
6390 if (insn->help_prep) {
6391 insn->help_prep(s, &o);
6393 if (insn->help_op) {
6394 ret = insn->help_op(s, &o);
6396 if (ret != DISAS_NORETURN) {
6397 if (insn->help_wout) {
6398 insn->help_wout(s, &o);
6400 if (insn->help_cout) {
6401 insn->help_cout(s, &o);
6405 /* Free any temporaries created by the helpers. */
6406 if (o.out && !o.g_out) {
6407 tcg_temp_free_i64(o.out);
6409 if (o.out2 && !o.g_out2) {
6410 tcg_temp_free_i64(o.out2);
6412 if (o.in1 && !o.g_in1) {
6413 tcg_temp_free_i64(o.in1);
6415 if (o.in2 && !o.g_in2) {
6416 tcg_temp_free_i64(o.in2);
6418 if (o.addr1) {
6419 tcg_temp_free_i64(o.addr1);
6422 #ifndef CONFIG_USER_ONLY
6423 if (s->base.tb->flags & FLAG_MASK_PER) {
6424 /* An exception might be triggered, save PSW if not already done. */
6425 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6426 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6429 /* Call the helper to check for a possible PER exception. */
6430 gen_helper_per_check_exception(cpu_env);
6432 #endif
6434 /* Advance to the next instruction. */
6435 s->base.pc_next = s->pc_tmp;
6436 return ret;
6439 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6441 DisasContext *dc = container_of(dcbase, DisasContext, base);
6443 /* 31-bit mode */
6444 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6445 dc->base.pc_first &= 0x7fffffff;
6446 dc->base.pc_next = dc->base.pc_first;
6449 dc->cc_op = CC_OP_DYNAMIC;
6450 dc->ex_value = dc->base.tb->cs_base;
6451 dc->do_debug = dc->base.singlestep_enabled;
6454 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6458 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6462 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6463 const CPUBreakpoint *bp)
6465 DisasContext *dc = container_of(dcbase, DisasContext, base);
6468 * Emit an insn_start to accompany the breakpoint exception.
6469 * The ILEN value is a dummy, since this does not result in
6470 * an s390x exception, but an internal qemu exception which
6471 * brings us back to interact with the gdbstub.
6473 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6475 dc->base.is_jmp = DISAS_PC_STALE;
6476 dc->do_debug = true;
6477 /* The address covered by the breakpoint must be included in
6478 [tb->pc, tb->pc + tb->size) in order to for it to be
6479 properly cleared -- thus we increment the PC here so that
6480 the logic setting tb->size does the right thing. */
6481 dc->base.pc_next += 2;
6482 return true;
6485 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6487 CPUS390XState *env = cs->env_ptr;
6488 DisasContext *dc = container_of(dcbase, DisasContext, base);
6490 dc->base.is_jmp = translate_one(env, dc);
6491 if (dc->base.is_jmp == DISAS_NEXT) {
6492 uint64_t page_start;
6494 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6495 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6496 dc->base.is_jmp = DISAS_TOO_MANY;
6501 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6503 DisasContext *dc = container_of(dcbase, DisasContext, base);
6505 switch (dc->base.is_jmp) {
6506 case DISAS_GOTO_TB:
6507 case DISAS_NORETURN:
6508 break;
6509 case DISAS_TOO_MANY:
6510 case DISAS_PC_STALE:
6511 case DISAS_PC_STALE_NOCHAIN:
6512 update_psw_addr(dc);
6513 /* FALLTHRU */
6514 case DISAS_PC_UPDATED:
6515 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6516 cc op type is in env */
6517 update_cc_op(dc);
6518 /* FALLTHRU */
6519 case DISAS_PC_CC_UPDATED:
6520 /* Exit the TB, either by raising a debug exception or by return. */
6521 if (dc->do_debug) {
6522 gen_exception(EXCP_DEBUG);
6523 } else if (use_exit_tb(dc) ||
6524 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6525 tcg_gen_exit_tb(NULL, 0);
6526 } else {
6527 tcg_gen_lookup_and_goto_ptr();
6529 break;
6530 default:
6531 g_assert_not_reached();
6535 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6537 DisasContext *dc = container_of(dcbase, DisasContext, base);
6539 if (unlikely(dc->ex_value)) {
6540 /* ??? Unfortunately log_target_disas can't use host memory. */
6541 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6542 } else {
6543 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6544 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6548 static const TranslatorOps s390x_tr_ops = {
6549 .init_disas_context = s390x_tr_init_disas_context,
6550 .tb_start = s390x_tr_tb_start,
6551 .insn_start = s390x_tr_insn_start,
6552 .breakpoint_check = s390x_tr_breakpoint_check,
6553 .translate_insn = s390x_tr_translate_insn,
6554 .tb_stop = s390x_tr_tb_stop,
6555 .disas_log = s390x_tr_disas_log,
6558 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6560 DisasContext dc;
6562 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6565 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6566 target_ulong *data)
6568 int cc_op = data[1];
6570 env->psw.addr = data[0];
6572 /* Update the CC opcode if it is not already up-to-date. */
6573 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6574 env->cc_op = cc_op;
6577 /* Record ILEN. */
6578 env->int_pgm_ilen = data[2];