configure / meson: Move check for sys/kcov.h to meson.build
[qemu/ar7.git] / target / s390x / translate.c
blobbe32938f6d21832ebb69e12ef7bbb2feca816d71
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
64 enum DisasFieldIndexO {
65 FLD_O_r1,
66 FLD_O_r2,
67 FLD_O_r3,
68 FLD_O_m1,
69 FLD_O_m3,
70 FLD_O_m4,
71 FLD_O_m5,
72 FLD_O_m6,
73 FLD_O_b1,
74 FLD_O_b2,
75 FLD_O_b4,
76 FLD_O_d1,
77 FLD_O_d2,
78 FLD_O_d4,
79 FLD_O_x2,
80 FLD_O_l1,
81 FLD_O_l2,
82 FLD_O_i1,
83 FLD_O_i2,
84 FLD_O_i3,
85 FLD_O_i4,
86 FLD_O_i5,
87 FLD_O_v1,
88 FLD_O_v2,
89 FLD_O_v3,
90 FLD_O_v4,
93 enum DisasFieldIndexC {
94 FLD_C_r1 = 0,
95 FLD_C_m1 = 0,
96 FLD_C_b1 = 0,
97 FLD_C_i1 = 0,
98 FLD_C_v1 = 0,
100 FLD_C_r2 = 1,
101 FLD_C_b2 = 1,
102 FLD_C_i2 = 1,
104 FLD_C_r3 = 2,
105 FLD_C_m3 = 2,
106 FLD_C_i3 = 2,
107 FLD_C_v3 = 2,
109 FLD_C_m4 = 3,
110 FLD_C_b4 = 3,
111 FLD_C_i4 = 3,
112 FLD_C_l1 = 3,
113 FLD_C_v4 = 3,
115 FLD_C_i5 = 4,
116 FLD_C_d1 = 4,
117 FLD_C_m5 = 4,
119 FLD_C_d2 = 5,
120 FLD_C_m6 = 5,
122 FLD_C_d4 = 6,
123 FLD_C_x2 = 6,
124 FLD_C_l2 = 6,
125 FLD_C_v2 = 6,
127 NUM_C_FIELD = 7
130 struct DisasFields {
131 uint64_t raw_insn;
132 unsigned op:8;
133 unsigned op2:8;
134 unsigned presentC:16;
135 unsigned int presentO;
136 int c[NUM_C_FIELD];
139 struct DisasContext {
140 DisasContextBase base;
141 const DisasInsn *insn;
142 DisasFields fields;
143 uint64_t ex_value;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 bool do_debug;
155 /* Information carried about a condition to be evaluated. */
156 typedef struct {
157 TCGCond cond:8;
158 bool is_64;
159 bool g1;
160 bool g2;
161 union {
162 struct { TCGv_i64 a, b; } s64;
163 struct { TCGv_i32 a, b; } s32;
164 } u;
165 } DisasCompare;
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 TCGv_i64 tmp;
176 if (s->base.tb->flags & FLAG_MASK_32) {
177 if (s->base.tb->flags & FLAG_MASK_64) {
178 tcg_gen_movi_i64(out, pc);
179 return;
181 pc |= 0x80000000;
183 assert(!(s->base.tb->flags & FLAG_MASK_64));
184 tmp = tcg_const_i64(pc);
185 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186 tcg_temp_free_i64(tmp);
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
201 void s390x_translate_init(void)
203 int i;
205 psw_addr = tcg_global_mem_new_i64(cpu_env,
206 offsetof(CPUS390XState, psw.addr),
207 "psw_addr");
208 psw_mask = tcg_global_mem_new_i64(cpu_env,
209 offsetof(CPUS390XState, psw.mask),
210 "psw_mask");
211 gbea = tcg_global_mem_new_i64(cpu_env,
212 offsetof(CPUS390XState, gbea),
213 "gbea");
215 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216 "cc_op");
217 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218 "cc_src");
219 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220 "cc_dst");
221 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222 "cc_vr");
224 for (i = 0; i < 16; i++) {
225 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226 regs[i] = tcg_global_mem_new(cpu_env,
227 offsetof(CPUS390XState, regs[i]),
228 cpu_reg_names[i]);
232 static inline int vec_full_reg_offset(uint8_t reg)
234 g_assert(reg < 32);
235 return offsetof(CPUS390XState, vregs[reg][0]);
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes = 1 << es;
242 int offs = enr * bytes;
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
252 * DW: [ 0] - [ 1]
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
258 * DW: [ 0] - [ 1]
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
266 g_assert(es <= MO_64);
267 #ifndef HOST_WORDS_BIGENDIAN
268 offs ^= (8 - bytes);
269 #endif
270 return offs + vec_full_reg_offset(reg);
273 static inline int freg64_offset(uint8_t reg)
275 g_assert(reg < 16);
276 return vec_reg_offset(reg, 0, MO_64);
279 static inline int freg32_offset(uint8_t reg)
281 g_assert(reg < 16);
282 return vec_reg_offset(reg, 0, MO_32);
285 static TCGv_i64 load_reg(int reg)
287 TCGv_i64 r = tcg_temp_new_i64();
288 tcg_gen_mov_i64(r, regs[reg]);
289 return r;
292 static TCGv_i64 load_freg(int reg)
294 TCGv_i64 r = tcg_temp_new_i64();
296 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297 return r;
300 static TCGv_i64 load_freg32_i64(int reg)
302 TCGv_i64 r = tcg_temp_new_i64();
304 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305 return r;
308 static void store_reg(int reg, TCGv_i64 v)
310 tcg_gen_mov_i64(regs[reg], v);
313 static void store_freg(int reg, TCGv_i64 v)
315 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
318 static void store_reg32_i64(int reg, TCGv_i64 v)
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
326 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
329 static void store_freg32_i64(int reg, TCGv_i64 v)
331 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
334 static void return_low128(TCGv_i64 dest)
336 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
339 static void update_psw_addr(DisasContext *s)
341 /* psw.addr */
342 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 static void per_branch(DisasContext *s, bool to_next)
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea, s->base.pc_next);
350 if (s->base.tb->flags & FLAG_MASK_PER) {
351 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352 gen_helper_per_branch(cpu_env, gbea, next_pc);
353 if (to_next) {
354 tcg_temp_free_i64(next_pc);
357 #endif
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361 TCGv_i64 arg1, TCGv_i64 arg2)
363 #ifndef CONFIG_USER_ONLY
364 if (s->base.tb->flags & FLAG_MASK_PER) {
365 TCGLabel *lab = gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 tcg_gen_movi_i64(gbea, s->base.pc_next);
369 gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 gen_set_label(lab);
372 } else {
373 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 tcg_temp_free_i64(pc);
377 #endif
380 static void per_breaking_event(DisasContext *s)
382 tcg_gen_movi_i64(gbea, s->base.pc_next);
385 static void update_cc_op(DisasContext *s)
387 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388 tcg_gen_movi_i32(cc_op, s->cc_op);
392 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
394 return (uint64_t)cpu_lduw_code(env, pc);
397 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
399 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
402 static int get_mem_index(DisasContext *s)
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 tcg_abort();
420 break;
422 #endif
425 static void gen_exception(int excp)
427 TCGv_i32 tmp = tcg_const_i32(excp);
428 gen_helper_exception(cpu_env, tmp);
429 tcg_temp_free_i32(tmp);
432 static void gen_program_exception(DisasContext *s, int code)
434 TCGv_i32 tmp;
436 /* Remember what pgm exeption this was. */
437 tmp = tcg_const_i32(code);
438 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
439 tcg_temp_free_i32(tmp);
441 tmp = tcg_const_i32(s->ilen);
442 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
443 tcg_temp_free_i32(tmp);
445 /* update the psw */
446 update_psw_addr(s);
448 /* Save off cc. */
449 update_cc_op(s);
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM);
455 static inline void gen_illegal_opcode(DisasContext *s)
457 gen_program_exception(s, PGM_OPERATION);
460 static inline void gen_data_exception(uint8_t dxc)
462 TCGv_i32 tmp = tcg_const_i32(dxc);
463 gen_helper_data_exception(cpu_env, tmp);
464 tcg_temp_free_i32(tmp);
467 static inline void gen_trap(DisasContext *s)
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
473 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
474 int64_t imm)
476 tcg_gen_addi_i64(dst, src, imm);
477 if (!(s->base.tb->flags & FLAG_MASK_64)) {
478 if (s->base.tb->flags & FLAG_MASK_32) {
479 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
480 } else {
481 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
486 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
488 TCGv_i64 tmp = tcg_temp_new_i64();
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
494 if (b2 && x2) {
495 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
496 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
497 } else if (b2) {
498 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
499 } else if (x2) {
500 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
501 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
502 if (s->base.tb->flags & FLAG_MASK_32) {
503 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
504 } else {
505 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
507 } else {
508 tcg_gen_movi_i64(tmp, d2);
511 return tmp;
514 static inline bool live_cc_data(DisasContext *s)
516 return (s->cc_op != CC_OP_DYNAMIC
517 && s->cc_op != CC_OP_STATIC
518 && s->cc_op > 3);
521 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
523 if (live_cc_data(s)) {
524 tcg_gen_discard_i64(cc_src);
525 tcg_gen_discard_i64(cc_dst);
526 tcg_gen_discard_i64(cc_vr);
528 s->cc_op = CC_OP_CONST0 + val;
531 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
533 if (live_cc_data(s)) {
534 tcg_gen_discard_i64(cc_src);
535 tcg_gen_discard_i64(cc_vr);
537 tcg_gen_mov_i64(cc_dst, dst);
538 s->cc_op = op;
541 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542 TCGv_i64 dst)
544 if (live_cc_data(s)) {
545 tcg_gen_discard_i64(cc_vr);
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 s->cc_op = op;
552 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
553 TCGv_i64 dst, TCGv_i64 vr)
555 tcg_gen_mov_i64(cc_src, src);
556 tcg_gen_mov_i64(cc_dst, dst);
557 tcg_gen_mov_i64(cc_vr, vr);
558 s->cc_op = op;
561 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
563 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
566 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
568 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
571 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
573 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
576 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
578 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext *s)
584 if (live_cc_data(s)) {
585 tcg_gen_discard_i64(cc_src);
586 tcg_gen_discard_i64(cc_dst);
587 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext *s)
595 TCGv_i32 local_cc_op = NULL;
596 TCGv_i64 dummy = NULL;
598 switch (s->cc_op) {
599 default:
600 dummy = tcg_const_i64(0);
601 /* FALLTHRU */
602 case CC_OP_ADD_64:
603 case CC_OP_ADDU_64:
604 case CC_OP_ADDC_64:
605 case CC_OP_SUB_64:
606 case CC_OP_SUBU_64:
607 case CC_OP_SUBB_64:
608 case CC_OP_ADD_32:
609 case CC_OP_ADDU_32:
610 case CC_OP_ADDC_32:
611 case CC_OP_SUB_32:
612 case CC_OP_SUBU_32:
613 case CC_OP_SUBB_32:
614 local_cc_op = tcg_const_i32(s->cc_op);
615 break;
616 case CC_OP_CONST0:
617 case CC_OP_CONST1:
618 case CC_OP_CONST2:
619 case CC_OP_CONST3:
620 case CC_OP_STATIC:
621 case CC_OP_DYNAMIC:
622 break;
625 switch (s->cc_op) {
626 case CC_OP_CONST0:
627 case CC_OP_CONST1:
628 case CC_OP_CONST2:
629 case CC_OP_CONST3:
630 /* s->cc_op is the cc value */
631 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
632 break;
633 case CC_OP_STATIC:
634 /* env->cc_op already is the cc value */
635 break;
636 case CC_OP_NZ:
637 case CC_OP_ABS_64:
638 case CC_OP_NABS_64:
639 case CC_OP_ABS_32:
640 case CC_OP_NABS_32:
641 case CC_OP_LTGT0_32:
642 case CC_OP_LTGT0_64:
643 case CC_OP_COMP_32:
644 case CC_OP_COMP_64:
645 case CC_OP_NZ_F32:
646 case CC_OP_NZ_F64:
647 case CC_OP_FLOGR:
648 case CC_OP_LCBB:
649 case CC_OP_MULS_32:
650 /* 1 argument */
651 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
652 break;
653 case CC_OP_ICM:
654 case CC_OP_LTGT_32:
655 case CC_OP_LTGT_64:
656 case CC_OP_LTUGTU_32:
657 case CC_OP_LTUGTU_64:
658 case CC_OP_TM_32:
659 case CC_OP_TM_64:
660 case CC_OP_SLA_32:
661 case CC_OP_SLA_64:
662 case CC_OP_NZ_F128:
663 case CC_OP_VC:
664 case CC_OP_MULS_64:
665 /* 2 arguments */
666 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
667 break;
668 case CC_OP_ADD_64:
669 case CC_OP_ADDU_64:
670 case CC_OP_ADDC_64:
671 case CC_OP_SUB_64:
672 case CC_OP_SUBU_64:
673 case CC_OP_SUBB_64:
674 case CC_OP_ADD_32:
675 case CC_OP_ADDU_32:
676 case CC_OP_ADDC_32:
677 case CC_OP_SUB_32:
678 case CC_OP_SUBU_32:
679 case CC_OP_SUBB_32:
680 /* 3 arguments */
681 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
682 break;
683 case CC_OP_DYNAMIC:
684 /* unknown operation - assume 3 arguments and cc_op in env */
685 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
686 break;
687 default:
688 tcg_abort();
691 if (local_cc_op) {
692 tcg_temp_free_i32(local_cc_op);
694 if (dummy) {
695 tcg_temp_free_i64(dummy);
698 /* We now have cc in cc_op as constant */
699 set_cc_static(s);
702 static bool use_exit_tb(DisasContext *s)
704 return s->base.singlestep_enabled ||
705 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
706 (s->base.tb->flags & FLAG_MASK_PER);
709 static bool use_goto_tb(DisasContext *s, uint64_t dest)
711 if (unlikely(use_exit_tb(s))) {
712 return false;
714 #ifndef CONFIG_USER_ONLY
715 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
716 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
717 #else
718 return true;
719 #endif
722 static void account_noninline_branch(DisasContext *s, int cc_op)
724 #ifdef DEBUG_INLINE_BRANCHES
725 inline_branch_miss[cc_op]++;
726 #endif
729 static void account_inline_branch(DisasContext *s, int cc_op)
731 #ifdef DEBUG_INLINE_BRANCHES
732 inline_branch_hit[cc_op]++;
733 #endif
736 /* Table of mask values to comparison codes, given a comparison as input.
737 For such, CC=3 should not be possible. */
738 static const TCGCond ltgt_cond[16] = {
739 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
740 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
741 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
742 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
743 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
744 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
745 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
746 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
749 /* Table of mask values to comparison codes, given a logic op as input.
750 For such, only CC=0 and CC=1 should be possible. */
751 static const TCGCond nz_cond[16] = {
752 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
753 TCG_COND_NEVER, TCG_COND_NEVER,
754 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
755 TCG_COND_NE, TCG_COND_NE,
756 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
757 TCG_COND_EQ, TCG_COND_EQ,
758 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
759 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
762 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
763 details required to generate a TCG comparison. */
764 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
766 TCGCond cond;
767 enum cc_op old_cc_op = s->cc_op;
769 if (mask == 15 || mask == 0) {
770 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
771 c->u.s32.a = cc_op;
772 c->u.s32.b = cc_op;
773 c->g1 = c->g2 = true;
774 c->is_64 = false;
775 return;
778 /* Find the TCG condition for the mask + cc op. */
779 switch (old_cc_op) {
780 case CC_OP_LTGT0_32:
781 case CC_OP_LTGT0_64:
782 case CC_OP_LTGT_32:
783 case CC_OP_LTGT_64:
784 cond = ltgt_cond[mask];
785 if (cond == TCG_COND_NEVER) {
786 goto do_dynamic;
788 account_inline_branch(s, old_cc_op);
789 break;
791 case CC_OP_LTUGTU_32:
792 case CC_OP_LTUGTU_64:
793 cond = tcg_unsigned_cond(ltgt_cond[mask]);
794 if (cond == TCG_COND_NEVER) {
795 goto do_dynamic;
797 account_inline_branch(s, old_cc_op);
798 break;
800 case CC_OP_NZ:
801 cond = nz_cond[mask];
802 if (cond == TCG_COND_NEVER) {
803 goto do_dynamic;
805 account_inline_branch(s, old_cc_op);
806 break;
808 case CC_OP_TM_32:
809 case CC_OP_TM_64:
810 switch (mask) {
811 case 8:
812 cond = TCG_COND_EQ;
813 break;
814 case 4 | 2 | 1:
815 cond = TCG_COND_NE;
816 break;
817 default:
818 goto do_dynamic;
820 account_inline_branch(s, old_cc_op);
821 break;
823 case CC_OP_ICM:
824 switch (mask) {
825 case 8:
826 cond = TCG_COND_EQ;
827 break;
828 case 4 | 2 | 1:
829 case 4 | 2:
830 cond = TCG_COND_NE;
831 break;
832 default:
833 goto do_dynamic;
835 account_inline_branch(s, old_cc_op);
836 break;
838 case CC_OP_FLOGR:
839 switch (mask & 0xa) {
840 case 8: /* src == 0 -> no one bit found */
841 cond = TCG_COND_EQ;
842 break;
843 case 2: /* src != 0 -> one bit found */
844 cond = TCG_COND_NE;
845 break;
846 default:
847 goto do_dynamic;
849 account_inline_branch(s, old_cc_op);
850 break;
852 case CC_OP_ADDU_32:
853 case CC_OP_ADDU_64:
854 switch (mask) {
855 case 8 | 2: /* vr == 0 */
856 cond = TCG_COND_EQ;
857 break;
858 case 4 | 1: /* vr != 0 */
859 cond = TCG_COND_NE;
860 break;
861 case 8 | 4: /* no carry -> vr >= src */
862 cond = TCG_COND_GEU;
863 break;
864 case 2 | 1: /* carry -> vr < src */
865 cond = TCG_COND_LTU;
866 break;
867 default:
868 goto do_dynamic;
870 account_inline_branch(s, old_cc_op);
871 break;
873 case CC_OP_SUBU_32:
874 case CC_OP_SUBU_64:
875 /* Note that CC=0 is impossible; treat it as dont-care. */
876 switch (mask & 7) {
877 case 2: /* zero -> op1 == op2 */
878 cond = TCG_COND_EQ;
879 break;
880 case 4 | 1: /* !zero -> op1 != op2 */
881 cond = TCG_COND_NE;
882 break;
883 case 4: /* borrow (!carry) -> op1 < op2 */
884 cond = TCG_COND_LTU;
885 break;
886 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
887 cond = TCG_COND_GEU;
888 break;
889 default:
890 goto do_dynamic;
892 account_inline_branch(s, old_cc_op);
893 break;
895 default:
896 do_dynamic:
897 /* Calculate cc value. */
898 gen_op_calc_cc(s);
899 /* FALLTHRU */
901 case CC_OP_STATIC:
902 /* Jump based on CC. We'll load up the real cond below;
903 the assignment here merely avoids a compiler warning. */
904 account_noninline_branch(s, old_cc_op);
905 old_cc_op = CC_OP_STATIC;
906 cond = TCG_COND_NEVER;
907 break;
910 /* Load up the arguments of the comparison. */
911 c->is_64 = true;
912 c->g1 = c->g2 = false;
913 switch (old_cc_op) {
914 case CC_OP_LTGT0_32:
915 c->is_64 = false;
916 c->u.s32.a = tcg_temp_new_i32();
917 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
918 c->u.s32.b = tcg_const_i32(0);
919 break;
920 case CC_OP_LTGT_32:
921 case CC_OP_LTUGTU_32:
922 case CC_OP_SUBU_32:
923 c->is_64 = false;
924 c->u.s32.a = tcg_temp_new_i32();
925 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
926 c->u.s32.b = tcg_temp_new_i32();
927 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
928 break;
930 case CC_OP_LTGT0_64:
931 case CC_OP_NZ:
932 case CC_OP_FLOGR:
933 c->u.s64.a = cc_dst;
934 c->u.s64.b = tcg_const_i64(0);
935 c->g1 = true;
936 break;
937 case CC_OP_LTGT_64:
938 case CC_OP_LTUGTU_64:
939 case CC_OP_SUBU_64:
940 c->u.s64.a = cc_src;
941 c->u.s64.b = cc_dst;
942 c->g1 = c->g2 = true;
943 break;
945 case CC_OP_TM_32:
946 case CC_OP_TM_64:
947 case CC_OP_ICM:
948 c->u.s64.a = tcg_temp_new_i64();
949 c->u.s64.b = tcg_const_i64(0);
950 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
951 break;
953 case CC_OP_ADDU_32:
954 c->is_64 = false;
955 c->u.s32.a = tcg_temp_new_i32();
956 c->u.s32.b = tcg_temp_new_i32();
957 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
958 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
959 tcg_gen_movi_i32(c->u.s32.b, 0);
960 } else {
961 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
963 break;
965 case CC_OP_ADDU_64:
966 c->u.s64.a = cc_vr;
967 c->g1 = true;
968 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
969 c->u.s64.b = tcg_const_i64(0);
970 } else {
971 c->u.s64.b = cc_src;
972 c->g2 = true;
974 break;
976 case CC_OP_STATIC:
977 c->is_64 = false;
978 c->u.s32.a = cc_op;
979 c->g1 = true;
980 switch (mask) {
981 case 0x8 | 0x4 | 0x2: /* cc != 3 */
982 cond = TCG_COND_NE;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 case 0x8 | 0x4 | 0x1: /* cc != 2 */
986 cond = TCG_COND_NE;
987 c->u.s32.b = tcg_const_i32(2);
988 break;
989 case 0x8 | 0x2 | 0x1: /* cc != 1 */
990 cond = TCG_COND_NE;
991 c->u.s32.b = tcg_const_i32(1);
992 break;
993 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
994 cond = TCG_COND_EQ;
995 c->g1 = false;
996 c->u.s32.a = tcg_temp_new_i32();
997 c->u.s32.b = tcg_const_i32(0);
998 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
999 break;
1000 case 0x8 | 0x4: /* cc < 2 */
1001 cond = TCG_COND_LTU;
1002 c->u.s32.b = tcg_const_i32(2);
1003 break;
1004 case 0x8: /* cc == 0 */
1005 cond = TCG_COND_EQ;
1006 c->u.s32.b = tcg_const_i32(0);
1007 break;
1008 case 0x4 | 0x2 | 0x1: /* cc != 0 */
1009 cond = TCG_COND_NE;
1010 c->u.s32.b = tcg_const_i32(0);
1011 break;
1012 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
1013 cond = TCG_COND_NE;
1014 c->g1 = false;
1015 c->u.s32.a = tcg_temp_new_i32();
1016 c->u.s32.b = tcg_const_i32(0);
1017 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
1018 break;
1019 case 0x4: /* cc == 1 */
1020 cond = TCG_COND_EQ;
1021 c->u.s32.b = tcg_const_i32(1);
1022 break;
1023 case 0x2 | 0x1: /* cc > 1 */
1024 cond = TCG_COND_GTU;
1025 c->u.s32.b = tcg_const_i32(1);
1026 break;
1027 case 0x2: /* cc == 2 */
1028 cond = TCG_COND_EQ;
1029 c->u.s32.b = tcg_const_i32(2);
1030 break;
1031 case 0x1: /* cc == 3 */
1032 cond = TCG_COND_EQ;
1033 c->u.s32.b = tcg_const_i32(3);
1034 break;
1035 default:
1036 /* CC is masked by something else: (8 >> cc) & mask. */
1037 cond = TCG_COND_NE;
1038 c->g1 = false;
1039 c->u.s32.a = tcg_const_i32(8);
1040 c->u.s32.b = tcg_const_i32(0);
1041 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
1042 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1043 break;
1045 break;
1047 default:
1048 abort();
1050 c->cond = cond;
1053 static void free_compare(DisasCompare *c)
1055 if (!c->g1) {
1056 if (c->is_64) {
1057 tcg_temp_free_i64(c->u.s64.a);
1058 } else {
1059 tcg_temp_free_i32(c->u.s32.a);
1062 if (!c->g2) {
1063 if (c->is_64) {
1064 tcg_temp_free_i64(c->u.s64.b);
1065 } else {
1066 tcg_temp_free_i32(c->u.s32.b);
1071 /* ====================================================================== */
1072 /* Define the insn format enumeration. */
1073 #define F0(N) FMT_##N,
1074 #define F1(N, X1) F0(N)
1075 #define F2(N, X1, X2) F0(N)
1076 #define F3(N, X1, X2, X3) F0(N)
1077 #define F4(N, X1, X2, X3, X4) F0(N)
1078 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1079 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1081 typedef enum {
1082 #include "insn-format.def"
1083 } DisasFormat;
1085 #undef F0
1086 #undef F1
1087 #undef F2
1088 #undef F3
1089 #undef F4
1090 #undef F5
1091 #undef F6
1093 /* This is the way fields are to be accessed out of DisasFields. */
1094 #define have_field(S, F) have_field1((S), FLD_O_##F)
1095 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1097 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1099 return (s->fields.presentO >> c) & 1;
1102 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1103 enum DisasFieldIndexC c)
1105 assert(have_field1(s, o));
1106 return s->fields.c[c];
1109 /* Describe the layout of each field in each format. */
1110 typedef struct DisasField {
1111 unsigned int beg:8;
1112 unsigned int size:8;
1113 unsigned int type:2;
1114 unsigned int indexC:6;
1115 enum DisasFieldIndexO indexO:8;
1116 } DisasField;
1118 typedef struct DisasFormatInfo {
1119 DisasField op[NUM_C_FIELD];
1120 } DisasFormatInfo;
1122 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1123 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1124 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1125 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1126 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1127 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1128 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1129 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1130 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1131 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1132 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1133 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1134 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1135 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1136 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1138 #define F0(N) { { } },
1139 #define F1(N, X1) { { X1 } },
1140 #define F2(N, X1, X2) { { X1, X2 } },
1141 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1142 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1143 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1144 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1146 static const DisasFormatInfo format_info[] = {
1147 #include "insn-format.def"
1150 #undef F0
1151 #undef F1
1152 #undef F2
1153 #undef F3
1154 #undef F4
1155 #undef F5
1156 #undef F6
1157 #undef R
1158 #undef M
1159 #undef V
1160 #undef BD
1161 #undef BXD
1162 #undef BDL
1163 #undef BXDL
1164 #undef I
1165 #undef L
1167 /* Generally, we'll extract operands into this structures, operate upon
1168 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1169 of routines below for more details. */
1170 typedef struct {
1171 bool g_out, g_out2, g_in1, g_in2;
1172 TCGv_i64 out, out2, in1, in2;
1173 TCGv_i64 addr1;
1174 } DisasOps;
1176 /* Instructions can place constraints on their operands, raising specification
1177 exceptions if they are violated. To make this easy to automate, each "in1",
1178 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1179 of the following, or 0. To make this easy to document, we'll put the
1180 SPEC_<name> defines next to <name>. */
1182 #define SPEC_r1_even 1
1183 #define SPEC_r2_even 2
1184 #define SPEC_r3_even 4
1185 #define SPEC_r1_f128 8
1186 #define SPEC_r2_f128 16
1188 /* Return values from translate_one, indicating the state of the TB. */
1190 /* We are not using a goto_tb (for whatever reason), but have updated
1191 the PC (for whatever reason), so there's no need to do it again on
1192 exiting the TB. */
1193 #define DISAS_PC_UPDATED DISAS_TARGET_0
1195 /* We have emitted one or more goto_tb. No fixup required. */
1196 #define DISAS_GOTO_TB DISAS_TARGET_1
1198 /* We have updated the PC and CC values. */
1199 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1201 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1202 updated the PC for the next instruction to be executed. */
1203 #define DISAS_PC_STALE DISAS_TARGET_3
1205 /* We are exiting the TB to the main loop. */
1206 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1209 /* Instruction flags */
1210 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1211 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1212 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1213 #define IF_BFP 0x0008 /* binary floating point instruction */
1214 #define IF_DFP 0x0010 /* decimal floating point instruction */
1215 #define IF_PRIV 0x0020 /* privileged instruction */
1216 #define IF_VEC 0x0040 /* vector instruction */
1217 #define IF_IO 0x0080 /* input/output instruction */
1219 struct DisasInsn {
1220 unsigned opc:16;
1221 unsigned flags:16;
1222 DisasFormat fmt:8;
1223 unsigned fac:8;
1224 unsigned spec:8;
1226 const char *name;
1228 /* Pre-process arguments before HELP_OP. */
1229 void (*help_in1)(DisasContext *, DisasOps *);
1230 void (*help_in2)(DisasContext *, DisasOps *);
1231 void (*help_prep)(DisasContext *, DisasOps *);
1234 * Post-process output after HELP_OP.
1235 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1237 void (*help_wout)(DisasContext *, DisasOps *);
1238 void (*help_cout)(DisasContext *, DisasOps *);
1240 /* Implement the operation itself. */
1241 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1243 uint64_t data;
1246 /* ====================================================================== */
1247 /* Miscellaneous helpers, used by several operations. */
1249 static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1251 int b2 = get_field(s, b2);
1252 int d2 = get_field(s, d2);
1254 if (b2 == 0) {
1255 o->in2 = tcg_const_i64(d2 & mask);
1256 } else {
1257 o->in2 = get_address(s, 0, b2, d2);
1258 tcg_gen_andi_i64(o->in2, o->in2, mask);
1262 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1264 if (dest == s->pc_tmp) {
1265 per_branch(s, true);
1266 return DISAS_NEXT;
1268 if (use_goto_tb(s, dest)) {
1269 update_cc_op(s);
1270 per_breaking_event(s);
1271 tcg_gen_goto_tb(0);
1272 tcg_gen_movi_i64(psw_addr, dest);
1273 tcg_gen_exit_tb(s->base.tb, 0);
1274 return DISAS_GOTO_TB;
1275 } else {
1276 tcg_gen_movi_i64(psw_addr, dest);
1277 per_branch(s, false);
1278 return DISAS_PC_UPDATED;
1282 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1283 bool is_imm, int imm, TCGv_i64 cdest)
1285 DisasJumpType ret;
1286 uint64_t dest = s->base.pc_next + 2 * imm;
1287 TCGLabel *lab;
1289 /* Take care of the special cases first. */
1290 if (c->cond == TCG_COND_NEVER) {
1291 ret = DISAS_NEXT;
1292 goto egress;
1294 if (is_imm) {
1295 if (dest == s->pc_tmp) {
1296 /* Branch to next. */
1297 per_branch(s, true);
1298 ret = DISAS_NEXT;
1299 goto egress;
1301 if (c->cond == TCG_COND_ALWAYS) {
1302 ret = help_goto_direct(s, dest);
1303 goto egress;
1305 } else {
1306 if (!cdest) {
1307 /* E.g. bcr %r0 -> no branch. */
1308 ret = DISAS_NEXT;
1309 goto egress;
1311 if (c->cond == TCG_COND_ALWAYS) {
1312 tcg_gen_mov_i64(psw_addr, cdest);
1313 per_branch(s, false);
1314 ret = DISAS_PC_UPDATED;
1315 goto egress;
1319 if (use_goto_tb(s, s->pc_tmp)) {
1320 if (is_imm && use_goto_tb(s, dest)) {
1321 /* Both exits can use goto_tb. */
1322 update_cc_op(s);
1324 lab = gen_new_label();
1325 if (c->is_64) {
1326 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1327 } else {
1328 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1331 /* Branch not taken. */
1332 tcg_gen_goto_tb(0);
1333 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1334 tcg_gen_exit_tb(s->base.tb, 0);
1336 /* Branch taken. */
1337 gen_set_label(lab);
1338 per_breaking_event(s);
1339 tcg_gen_goto_tb(1);
1340 tcg_gen_movi_i64(psw_addr, dest);
1341 tcg_gen_exit_tb(s->base.tb, 1);
1343 ret = DISAS_GOTO_TB;
1344 } else {
1345 /* Fallthru can use goto_tb, but taken branch cannot. */
1346 /* Store taken branch destination before the brcond. This
1347 avoids having to allocate a new local temp to hold it.
1348 We'll overwrite this in the not taken case anyway. */
1349 if (!is_imm) {
1350 tcg_gen_mov_i64(psw_addr, cdest);
1353 lab = gen_new_label();
1354 if (c->is_64) {
1355 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1356 } else {
1357 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1360 /* Branch not taken. */
1361 update_cc_op(s);
1362 tcg_gen_goto_tb(0);
1363 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1364 tcg_gen_exit_tb(s->base.tb, 0);
1366 gen_set_label(lab);
1367 if (is_imm) {
1368 tcg_gen_movi_i64(psw_addr, dest);
1370 per_breaking_event(s);
1371 ret = DISAS_PC_UPDATED;
1373 } else {
1374 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1375 Most commonly we're single-stepping or some other condition that
1376 disables all use of goto_tb. Just update the PC and exit. */
1378 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1379 if (is_imm) {
1380 cdest = tcg_const_i64(dest);
1383 if (c->is_64) {
1384 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1385 cdest, next);
1386 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1387 } else {
1388 TCGv_i32 t0 = tcg_temp_new_i32();
1389 TCGv_i64 t1 = tcg_temp_new_i64();
1390 TCGv_i64 z = tcg_const_i64(0);
1391 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1392 tcg_gen_extu_i32_i64(t1, t0);
1393 tcg_temp_free_i32(t0);
1394 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1395 per_branch_cond(s, TCG_COND_NE, t1, z);
1396 tcg_temp_free_i64(t1);
1397 tcg_temp_free_i64(z);
1400 if (is_imm) {
1401 tcg_temp_free_i64(cdest);
1403 tcg_temp_free_i64(next);
1405 ret = DISAS_PC_UPDATED;
1408 egress:
1409 free_compare(c);
1410 return ret;
1413 /* ====================================================================== */
1414 /* The operations. These perform the bulk of the work for any insn,
1415 usually after the operands have been loaded and output initialized. */
1417 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1419 tcg_gen_abs_i64(o->out, o->in2);
1420 return DISAS_NEXT;
1423 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1425 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1426 return DISAS_NEXT;
1429 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1431 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1432 return DISAS_NEXT;
1435 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1437 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1438 tcg_gen_mov_i64(o->out2, o->in2);
1439 return DISAS_NEXT;
1442 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1444 tcg_gen_add_i64(o->out, o->in1, o->in2);
1445 return DISAS_NEXT;
1448 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1450 DisasCompare cmp;
1451 TCGv_i64 carry;
1453 tcg_gen_add_i64(o->out, o->in1, o->in2);
1455 /* The carry flag is the msb of CC, therefore the branch mask that would
1456 create that comparison is 3. Feeding the generated comparison to
1457 setcond produces the carry flag that we desire. */
1458 disas_jcc(s, &cmp, 3);
1459 carry = tcg_temp_new_i64();
1460 if (cmp.is_64) {
1461 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1462 } else {
1463 TCGv_i32 t = tcg_temp_new_i32();
1464 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1465 tcg_gen_extu_i32_i64(carry, t);
1466 tcg_temp_free_i32(t);
1468 free_compare(&cmp);
1470 tcg_gen_add_i64(o->out, o->out, carry);
1471 tcg_temp_free_i64(carry);
1472 return DISAS_NEXT;
1475 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1477 o->in1 = tcg_temp_new_i64();
1479 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1480 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1481 } else {
1482 /* Perform the atomic addition in memory. */
1483 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1484 s->insn->data);
1487 /* Recompute also for atomic case: needed for setting CC. */
1488 tcg_gen_add_i64(o->out, o->in1, o->in2);
1490 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1491 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1493 return DISAS_NEXT;
1496 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1498 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1499 return DISAS_NEXT;
1502 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1504 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1505 return DISAS_NEXT;
1508 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1510 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1511 return_low128(o->out2);
1512 return DISAS_NEXT;
1515 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1517 tcg_gen_and_i64(o->out, o->in1, o->in2);
1518 return DISAS_NEXT;
1521 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1523 int shift = s->insn->data & 0xff;
1524 int size = s->insn->data >> 8;
1525 uint64_t mask = ((1ull << size) - 1) << shift;
1527 assert(!o->g_in2);
1528 tcg_gen_shli_i64(o->in2, o->in2, shift);
1529 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1530 tcg_gen_and_i64(o->out, o->in1, o->in2);
1532 /* Produce the CC from only the bits manipulated. */
1533 tcg_gen_andi_i64(cc_dst, o->out, mask);
1534 set_cc_nz_u64(s, cc_dst);
1535 return DISAS_NEXT;
1538 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1540 o->in1 = tcg_temp_new_i64();
1542 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1543 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1544 } else {
1545 /* Perform the atomic operation in memory. */
1546 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1547 s->insn->data);
1550 /* Recompute also for atomic case: needed for setting CC. */
1551 tcg_gen_and_i64(o->out, o->in1, o->in2);
1553 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1554 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1556 return DISAS_NEXT;
1559 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1561 pc_to_link_info(o->out, s, s->pc_tmp);
1562 if (o->in2) {
1563 tcg_gen_mov_i64(psw_addr, o->in2);
1564 per_branch(s, false);
1565 return DISAS_PC_UPDATED;
1566 } else {
1567 return DISAS_NEXT;
1571 static void save_link_info(DisasContext *s, DisasOps *o)
1573 TCGv_i64 t;
1575 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1576 pc_to_link_info(o->out, s, s->pc_tmp);
1577 return;
1579 gen_op_calc_cc(s);
1580 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1581 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1582 t = tcg_temp_new_i64();
1583 tcg_gen_shri_i64(t, psw_mask, 16);
1584 tcg_gen_andi_i64(t, t, 0x0f000000);
1585 tcg_gen_or_i64(o->out, o->out, t);
1586 tcg_gen_extu_i32_i64(t, cc_op);
1587 tcg_gen_shli_i64(t, t, 28);
1588 tcg_gen_or_i64(o->out, o->out, t);
1589 tcg_temp_free_i64(t);
1592 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1594 save_link_info(s, o);
1595 if (o->in2) {
1596 tcg_gen_mov_i64(psw_addr, o->in2);
1597 per_branch(s, false);
1598 return DISAS_PC_UPDATED;
1599 } else {
1600 return DISAS_NEXT;
1604 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1606 pc_to_link_info(o->out, s, s->pc_tmp);
1607 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1610 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1612 int m1 = get_field(s, m1);
1613 bool is_imm = have_field(s, i2);
1614 int imm = is_imm ? get_field(s, i2) : 0;
1615 DisasCompare c;
1617 /* BCR with R2 = 0 causes no branching */
1618 if (have_field(s, r2) && get_field(s, r2) == 0) {
1619 if (m1 == 14) {
1620 /* Perform serialization */
1621 /* FIXME: check for fast-BCR-serialization facility */
1622 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1624 if (m1 == 15) {
1625 /* Perform serialization */
1626 /* FIXME: perform checkpoint-synchronisation */
1627 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1629 return DISAS_NEXT;
1632 disas_jcc(s, &c, m1);
1633 return help_branch(s, &c, is_imm, imm, o->in2);
1636 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1638 int r1 = get_field(s, r1);
1639 bool is_imm = have_field(s, i2);
1640 int imm = is_imm ? get_field(s, i2) : 0;
1641 DisasCompare c;
1642 TCGv_i64 t;
1644 c.cond = TCG_COND_NE;
1645 c.is_64 = false;
1646 c.g1 = false;
1647 c.g2 = false;
1649 t = tcg_temp_new_i64();
1650 tcg_gen_subi_i64(t, regs[r1], 1);
1651 store_reg32_i64(r1, t);
1652 c.u.s32.a = tcg_temp_new_i32();
1653 c.u.s32.b = tcg_const_i32(0);
1654 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1655 tcg_temp_free_i64(t);
1657 return help_branch(s, &c, is_imm, imm, o->in2);
1660 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1662 int r1 = get_field(s, r1);
1663 int imm = get_field(s, i2);
1664 DisasCompare c;
1665 TCGv_i64 t;
1667 c.cond = TCG_COND_NE;
1668 c.is_64 = false;
1669 c.g1 = false;
1670 c.g2 = false;
1672 t = tcg_temp_new_i64();
1673 tcg_gen_shri_i64(t, regs[r1], 32);
1674 tcg_gen_subi_i64(t, t, 1);
1675 store_reg32h_i64(r1, t);
1676 c.u.s32.a = tcg_temp_new_i32();
1677 c.u.s32.b = tcg_const_i32(0);
1678 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679 tcg_temp_free_i64(t);
1681 return help_branch(s, &c, 1, imm, o->in2);
1684 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1686 int r1 = get_field(s, r1);
1687 bool is_imm = have_field(s, i2);
1688 int imm = is_imm ? get_field(s, i2) : 0;
1689 DisasCompare c;
1691 c.cond = TCG_COND_NE;
1692 c.is_64 = true;
1693 c.g1 = true;
1694 c.g2 = false;
1696 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1697 c.u.s64.a = regs[r1];
1698 c.u.s64.b = tcg_const_i64(0);
1700 return help_branch(s, &c, is_imm, imm, o->in2);
1703 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1705 int r1 = get_field(s, r1);
1706 int r3 = get_field(s, r3);
1707 bool is_imm = have_field(s, i2);
1708 int imm = is_imm ? get_field(s, i2) : 0;
1709 DisasCompare c;
1710 TCGv_i64 t;
1712 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1713 c.is_64 = false;
1714 c.g1 = false;
1715 c.g2 = false;
1717 t = tcg_temp_new_i64();
1718 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1719 c.u.s32.a = tcg_temp_new_i32();
1720 c.u.s32.b = tcg_temp_new_i32();
1721 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1722 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1723 store_reg32_i64(r1, t);
1724 tcg_temp_free_i64(t);
1726 return help_branch(s, &c, is_imm, imm, o->in2);
1729 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1731 int r1 = get_field(s, r1);
1732 int r3 = get_field(s, r3);
1733 bool is_imm = have_field(s, i2);
1734 int imm = is_imm ? get_field(s, i2) : 0;
1735 DisasCompare c;
1737 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1738 c.is_64 = true;
1740 if (r1 == (r3 | 1)) {
1741 c.u.s64.b = load_reg(r3 | 1);
1742 c.g2 = false;
1743 } else {
1744 c.u.s64.b = regs[r3 | 1];
1745 c.g2 = true;
1748 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1749 c.u.s64.a = regs[r1];
1750 c.g1 = true;
1752 return help_branch(s, &c, is_imm, imm, o->in2);
1755 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1757 int imm, m3 = get_field(s, m3);
1758 bool is_imm;
1759 DisasCompare c;
1761 c.cond = ltgt_cond[m3];
1762 if (s->insn->data) {
1763 c.cond = tcg_unsigned_cond(c.cond);
1765 c.is_64 = c.g1 = c.g2 = true;
1766 c.u.s64.a = o->in1;
1767 c.u.s64.b = o->in2;
1769 is_imm = have_field(s, i4);
1770 if (is_imm) {
1771 imm = get_field(s, i4);
1772 } else {
1773 imm = 0;
1774 o->out = get_address(s, 0, get_field(s, b4),
1775 get_field(s, d4));
1778 return help_branch(s, &c, is_imm, imm, o->out);
1781 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1783 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1784 set_cc_static(s);
1785 return DISAS_NEXT;
1788 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1790 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1791 set_cc_static(s);
1792 return DISAS_NEXT;
1795 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1797 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1798 set_cc_static(s);
1799 return DISAS_NEXT;
1802 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1803 bool m4_with_fpe)
1805 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1806 uint8_t m3 = get_field(s, m3);
1807 uint8_t m4 = get_field(s, m4);
1809 /* m3 field was introduced with FPE */
1810 if (!fpe && m3_with_fpe) {
1811 m3 = 0;
1813 /* m4 field was introduced with FPE */
1814 if (!fpe && m4_with_fpe) {
1815 m4 = 0;
1818 /* Check for valid rounding modes. Mode 3 was introduced later. */
1819 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1820 gen_program_exception(s, PGM_SPECIFICATION);
1821 return NULL;
1824 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1827 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1829 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1831 if (!m34) {
1832 return DISAS_NORETURN;
1834 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1835 tcg_temp_free_i32(m34);
1836 gen_set_cc_nz_f32(s, o->in2);
1837 return DISAS_NEXT;
1840 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1842 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844 if (!m34) {
1845 return DISAS_NORETURN;
1847 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1848 tcg_temp_free_i32(m34);
1849 gen_set_cc_nz_f64(s, o->in2);
1850 return DISAS_NEXT;
1853 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1855 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1857 if (!m34) {
1858 return DISAS_NORETURN;
1860 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1861 tcg_temp_free_i32(m34);
1862 gen_set_cc_nz_f128(s, o->in1, o->in2);
1863 return DISAS_NEXT;
1866 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1868 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1870 if (!m34) {
1871 return DISAS_NORETURN;
1873 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1874 tcg_temp_free_i32(m34);
1875 gen_set_cc_nz_f32(s, o->in2);
1876 return DISAS_NEXT;
1879 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1881 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1883 if (!m34) {
1884 return DISAS_NORETURN;
1886 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1887 tcg_temp_free_i32(m34);
1888 gen_set_cc_nz_f64(s, o->in2);
1889 return DISAS_NEXT;
1892 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1894 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1896 if (!m34) {
1897 return DISAS_NORETURN;
1899 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1900 tcg_temp_free_i32(m34);
1901 gen_set_cc_nz_f128(s, o->in1, o->in2);
1902 return DISAS_NEXT;
1905 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1907 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1909 if (!m34) {
1910 return DISAS_NORETURN;
1912 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1913 tcg_temp_free_i32(m34);
1914 gen_set_cc_nz_f32(s, o->in2);
1915 return DISAS_NEXT;
1918 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1920 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1922 if (!m34) {
1923 return DISAS_NORETURN;
1925 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1926 tcg_temp_free_i32(m34);
1927 gen_set_cc_nz_f64(s, o->in2);
1928 return DISAS_NEXT;
1931 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1933 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1935 if (!m34) {
1936 return DISAS_NORETURN;
1938 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1939 tcg_temp_free_i32(m34);
1940 gen_set_cc_nz_f128(s, o->in1, o->in2);
1941 return DISAS_NEXT;
1944 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1946 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1948 if (!m34) {
1949 return DISAS_NORETURN;
1951 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1952 tcg_temp_free_i32(m34);
1953 gen_set_cc_nz_f32(s, o->in2);
1954 return DISAS_NEXT;
1957 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1959 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961 if (!m34) {
1962 return DISAS_NORETURN;
1964 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1965 tcg_temp_free_i32(m34);
1966 gen_set_cc_nz_f64(s, o->in2);
1967 return DISAS_NEXT;
1970 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1972 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1974 if (!m34) {
1975 return DISAS_NORETURN;
1977 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1978 tcg_temp_free_i32(m34);
1979 gen_set_cc_nz_f128(s, o->in1, o->in2);
1980 return DISAS_NEXT;
1983 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1985 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1987 if (!m34) {
1988 return DISAS_NORETURN;
1990 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1991 tcg_temp_free_i32(m34);
1992 return DISAS_NEXT;
1995 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1997 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1999 if (!m34) {
2000 return DISAS_NORETURN;
2002 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2003 tcg_temp_free_i32(m34);
2004 return DISAS_NEXT;
2007 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2009 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2011 if (!m34) {
2012 return DISAS_NORETURN;
2014 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2015 tcg_temp_free_i32(m34);
2016 return_low128(o->out2);
2017 return DISAS_NEXT;
2020 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2022 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2024 if (!m34) {
2025 return DISAS_NORETURN;
2027 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2028 tcg_temp_free_i32(m34);
2029 return DISAS_NEXT;
2032 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2034 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2036 if (!m34) {
2037 return DISAS_NORETURN;
2039 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2040 tcg_temp_free_i32(m34);
2041 return DISAS_NEXT;
2044 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2046 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2048 if (!m34) {
2049 return DISAS_NORETURN;
2051 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2052 tcg_temp_free_i32(m34);
2053 return_low128(o->out2);
2054 return DISAS_NEXT;
2057 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2059 int r2 = get_field(s, r2);
2060 TCGv_i64 len = tcg_temp_new_i64();
2062 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2063 set_cc_static(s);
2064 return_low128(o->out);
2066 tcg_gen_add_i64(regs[r2], regs[r2], len);
2067 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2068 tcg_temp_free_i64(len);
2070 return DISAS_NEXT;
2073 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2075 int l = get_field(s, l1);
2076 TCGv_i32 vl;
2078 switch (l + 1) {
2079 case 1:
2080 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2081 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2082 break;
2083 case 2:
2084 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2085 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2086 break;
2087 case 4:
2088 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2089 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2090 break;
2091 case 8:
2092 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2093 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2094 break;
2095 default:
2096 vl = tcg_const_i32(l);
2097 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2098 tcg_temp_free_i32(vl);
2099 set_cc_static(s);
2100 return DISAS_NEXT;
2102 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2103 return DISAS_NEXT;
2106 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2108 int r1 = get_field(s, r1);
2109 int r2 = get_field(s, r2);
2110 TCGv_i32 t1, t2;
2112 /* r1 and r2 must be even. */
2113 if (r1 & 1 || r2 & 1) {
2114 gen_program_exception(s, PGM_SPECIFICATION);
2115 return DISAS_NORETURN;
2118 t1 = tcg_const_i32(r1);
2119 t2 = tcg_const_i32(r2);
2120 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2121 tcg_temp_free_i32(t1);
2122 tcg_temp_free_i32(t2);
2123 set_cc_static(s);
2124 return DISAS_NEXT;
2127 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2129 int r1 = get_field(s, r1);
2130 int r3 = get_field(s, r3);
2131 TCGv_i32 t1, t3;
2133 /* r1 and r3 must be even. */
2134 if (r1 & 1 || r3 & 1) {
2135 gen_program_exception(s, PGM_SPECIFICATION);
2136 return DISAS_NORETURN;
2139 t1 = tcg_const_i32(r1);
2140 t3 = tcg_const_i32(r3);
2141 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2142 tcg_temp_free_i32(t1);
2143 tcg_temp_free_i32(t3);
2144 set_cc_static(s);
2145 return DISAS_NEXT;
2148 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2150 int r1 = get_field(s, r1);
2151 int r3 = get_field(s, r3);
2152 TCGv_i32 t1, t3;
2154 /* r1 and r3 must be even. */
2155 if (r1 & 1 || r3 & 1) {
2156 gen_program_exception(s, PGM_SPECIFICATION);
2157 return DISAS_NORETURN;
2160 t1 = tcg_const_i32(r1);
2161 t3 = tcg_const_i32(r3);
2162 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2163 tcg_temp_free_i32(t1);
2164 tcg_temp_free_i32(t3);
2165 set_cc_static(s);
2166 return DISAS_NEXT;
2169 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2171 TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2172 TCGv_i32 t1 = tcg_temp_new_i32();
2173 tcg_gen_extrl_i64_i32(t1, o->in1);
2174 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2175 set_cc_static(s);
2176 tcg_temp_free_i32(t1);
2177 tcg_temp_free_i32(m3);
2178 return DISAS_NEXT;
2181 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2183 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2184 set_cc_static(s);
2185 return_low128(o->in2);
2186 return DISAS_NEXT;
2189 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2191 TCGv_i64 t = tcg_temp_new_i64();
2192 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2193 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2194 tcg_gen_or_i64(o->out, o->out, t);
2195 tcg_temp_free_i64(t);
2196 return DISAS_NEXT;
2199 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2201 int d2 = get_field(s, d2);
2202 int b2 = get_field(s, b2);
2203 TCGv_i64 addr, cc;
2205 /* Note that in1 = R3 (new value) and
2206 in2 = (zero-extended) R1 (expected value). */
2208 addr = get_address(s, 0, b2, d2);
2209 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2210 get_mem_index(s), s->insn->data | MO_ALIGN);
2211 tcg_temp_free_i64(addr);
2213 /* Are the memory and expected values (un)equal? Note that this setcond
2214 produces the output CC value, thus the NE sense of the test. */
2215 cc = tcg_temp_new_i64();
2216 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2217 tcg_gen_extrl_i64_i32(cc_op, cc);
2218 tcg_temp_free_i64(cc);
2219 set_cc_static(s);
2221 return DISAS_NEXT;
2224 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2226 int r1 = get_field(s, r1);
2227 int r3 = get_field(s, r3);
2228 int d2 = get_field(s, d2);
2229 int b2 = get_field(s, b2);
2230 DisasJumpType ret = DISAS_NEXT;
2231 TCGv_i64 addr;
2232 TCGv_i32 t_r1, t_r3;
2234 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2235 addr = get_address(s, 0, b2, d2);
2236 t_r1 = tcg_const_i32(r1);
2237 t_r3 = tcg_const_i32(r3);
2238 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2239 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2240 } else if (HAVE_CMPXCHG128) {
2241 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2242 } else {
2243 gen_helper_exit_atomic(cpu_env);
2244 ret = DISAS_NORETURN;
2246 tcg_temp_free_i64(addr);
2247 tcg_temp_free_i32(t_r1);
2248 tcg_temp_free_i32(t_r3);
2250 set_cc_static(s);
2251 return ret;
2254 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2256 int r3 = get_field(s, r3);
2257 TCGv_i32 t_r3 = tcg_const_i32(r3);
2259 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2260 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2261 } else {
2262 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2264 tcg_temp_free_i32(t_r3);
2266 set_cc_static(s);
2267 return DISAS_NEXT;
2270 #ifndef CONFIG_USER_ONLY
2271 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2273 MemOp mop = s->insn->data;
2274 TCGv_i64 addr, old, cc;
2275 TCGLabel *lab = gen_new_label();
2277 /* Note that in1 = R1 (zero-extended expected value),
2278 out = R1 (original reg), out2 = R1+1 (new value). */
2280 addr = tcg_temp_new_i64();
2281 old = tcg_temp_new_i64();
2282 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2283 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2284 get_mem_index(s), mop | MO_ALIGN);
2285 tcg_temp_free_i64(addr);
2287 /* Are the memory and expected values (un)equal? */
2288 cc = tcg_temp_new_i64();
2289 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2290 tcg_gen_extrl_i64_i32(cc_op, cc);
2292 /* Write back the output now, so that it happens before the
2293 following branch, so that we don't need local temps. */
2294 if ((mop & MO_SIZE) == MO_32) {
2295 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2296 } else {
2297 tcg_gen_mov_i64(o->out, old);
2299 tcg_temp_free_i64(old);
2301 /* If the comparison was equal, and the LSB of R2 was set,
2302 then we need to flush the TLB (for all cpus). */
2303 tcg_gen_xori_i64(cc, cc, 1);
2304 tcg_gen_and_i64(cc, cc, o->in2);
2305 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2306 tcg_temp_free_i64(cc);
2308 gen_helper_purge(cpu_env);
2309 gen_set_label(lab);
2311 return DISAS_NEXT;
2313 #endif
2315 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2317 TCGv_i64 t1 = tcg_temp_new_i64();
2318 TCGv_i32 t2 = tcg_temp_new_i32();
2319 tcg_gen_extrl_i64_i32(t2, o->in1);
2320 gen_helper_cvd(t1, t2);
2321 tcg_temp_free_i32(t2);
2322 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2323 tcg_temp_free_i64(t1);
2324 return DISAS_NEXT;
2327 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2329 int m3 = get_field(s, m3);
2330 TCGLabel *lab = gen_new_label();
2331 TCGCond c;
2333 c = tcg_invert_cond(ltgt_cond[m3]);
2334 if (s->insn->data) {
2335 c = tcg_unsigned_cond(c);
2337 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2339 /* Trap. */
2340 gen_trap(s);
2342 gen_set_label(lab);
2343 return DISAS_NEXT;
2346 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2348 int m3 = get_field(s, m3);
2349 int r1 = get_field(s, r1);
2350 int r2 = get_field(s, r2);
2351 TCGv_i32 tr1, tr2, chk;
2353 /* R1 and R2 must both be even. */
2354 if ((r1 | r2) & 1) {
2355 gen_program_exception(s, PGM_SPECIFICATION);
2356 return DISAS_NORETURN;
2358 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2359 m3 = 0;
2362 tr1 = tcg_const_i32(r1);
2363 tr2 = tcg_const_i32(r2);
2364 chk = tcg_const_i32(m3);
2366 switch (s->insn->data) {
2367 case 12:
2368 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2369 break;
2370 case 14:
2371 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2372 break;
2373 case 21:
2374 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2375 break;
2376 case 24:
2377 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2378 break;
2379 case 41:
2380 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2381 break;
2382 case 42:
2383 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2384 break;
2385 default:
2386 g_assert_not_reached();
2389 tcg_temp_free_i32(tr1);
2390 tcg_temp_free_i32(tr2);
2391 tcg_temp_free_i32(chk);
2392 set_cc_static(s);
2393 return DISAS_NEXT;
2396 #ifndef CONFIG_USER_ONLY
2397 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2399 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2400 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2401 TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2403 gen_helper_diag(cpu_env, r1, r3, func_code);
2405 tcg_temp_free_i32(func_code);
2406 tcg_temp_free_i32(r3);
2407 tcg_temp_free_i32(r1);
2408 return DISAS_NEXT;
2410 #endif
2412 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2414 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2415 return_low128(o->out);
2416 return DISAS_NEXT;
2419 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2421 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2422 return_low128(o->out);
2423 return DISAS_NEXT;
2426 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2428 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2429 return_low128(o->out);
2430 return DISAS_NEXT;
2433 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2435 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2436 return_low128(o->out);
2437 return DISAS_NEXT;
2440 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2442 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2443 return DISAS_NEXT;
2446 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2448 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2449 return DISAS_NEXT;
2452 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2454 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2455 return_low128(o->out2);
2456 return DISAS_NEXT;
2459 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2461 int r2 = get_field(s, r2);
2462 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2463 return DISAS_NEXT;
2466 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2468 /* No cache information provided. */
2469 tcg_gen_movi_i64(o->out, -1);
2470 return DISAS_NEXT;
2473 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2475 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2476 return DISAS_NEXT;
2479 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2481 int r1 = get_field(s, r1);
2482 int r2 = get_field(s, r2);
2483 TCGv_i64 t = tcg_temp_new_i64();
2485 /* Note the "subsequently" in the PoO, which implies a defined result
2486 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2487 tcg_gen_shri_i64(t, psw_mask, 32);
2488 store_reg32_i64(r1, t);
2489 if (r2 != 0) {
2490 store_reg32_i64(r2, psw_mask);
2493 tcg_temp_free_i64(t);
2494 return DISAS_NEXT;
2497 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2499 int r1 = get_field(s, r1);
2500 TCGv_i32 ilen;
2501 TCGv_i64 v1;
2503 /* Nested EXECUTE is not allowed. */
2504 if (unlikely(s->ex_value)) {
2505 gen_program_exception(s, PGM_EXECUTE);
2506 return DISAS_NORETURN;
2509 update_psw_addr(s);
2510 update_cc_op(s);
2512 if (r1 == 0) {
2513 v1 = tcg_const_i64(0);
2514 } else {
2515 v1 = regs[r1];
2518 ilen = tcg_const_i32(s->ilen);
2519 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2520 tcg_temp_free_i32(ilen);
2522 if (r1 == 0) {
2523 tcg_temp_free_i64(v1);
2526 return DISAS_PC_CC_UPDATED;
2529 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2531 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2533 if (!m34) {
2534 return DISAS_NORETURN;
2536 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2537 tcg_temp_free_i32(m34);
2538 return DISAS_NEXT;
2541 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2543 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2545 if (!m34) {
2546 return DISAS_NORETURN;
2548 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2549 tcg_temp_free_i32(m34);
2550 return DISAS_NEXT;
2553 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2555 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2557 if (!m34) {
2558 return DISAS_NORETURN;
2560 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2561 return_low128(o->out2);
2562 tcg_temp_free_i32(m34);
2563 return DISAS_NEXT;
2566 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2568 /* We'll use the original input for cc computation, since we get to
2569 compare that against 0, which ought to be better than comparing
2570 the real output against 64. It also lets cc_dst be a convenient
2571 temporary during our computation. */
2572 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2574 /* R1 = IN ? CLZ(IN) : 64. */
2575 tcg_gen_clzi_i64(o->out, o->in2, 64);
2577 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2578 value by 64, which is undefined. But since the shift is 64 iff the
2579 input is zero, we still get the correct result after and'ing. */
2580 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2581 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2582 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2583 return DISAS_NEXT;
2586 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2588 int m3 = get_field(s, m3);
2589 int pos, len, base = s->insn->data;
2590 TCGv_i64 tmp = tcg_temp_new_i64();
2591 uint64_t ccm;
2593 switch (m3) {
2594 case 0xf:
2595 /* Effectively a 32-bit load. */
2596 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2597 len = 32;
2598 goto one_insert;
2600 case 0xc:
2601 case 0x6:
2602 case 0x3:
2603 /* Effectively a 16-bit load. */
2604 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2605 len = 16;
2606 goto one_insert;
2608 case 0x8:
2609 case 0x4:
2610 case 0x2:
2611 case 0x1:
2612 /* Effectively an 8-bit load. */
2613 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2614 len = 8;
2615 goto one_insert;
2617 one_insert:
2618 pos = base + ctz32(m3) * 8;
2619 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2620 ccm = ((1ull << len) - 1) << pos;
2621 break;
2623 default:
2624 /* This is going to be a sequence of loads and inserts. */
2625 pos = base + 32 - 8;
2626 ccm = 0;
2627 while (m3) {
2628 if (m3 & 0x8) {
2629 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2630 tcg_gen_addi_i64(o->in2, o->in2, 1);
2631 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2632 ccm |= 0xff << pos;
2634 m3 = (m3 << 1) & 0xf;
2635 pos -= 8;
2637 break;
2640 tcg_gen_movi_i64(tmp, ccm);
2641 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2642 tcg_temp_free_i64(tmp);
2643 return DISAS_NEXT;
2646 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2648 int shift = s->insn->data & 0xff;
2649 int size = s->insn->data >> 8;
2650 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2651 return DISAS_NEXT;
2654 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2656 TCGv_i64 t1, t2;
2658 gen_op_calc_cc(s);
2659 t1 = tcg_temp_new_i64();
2660 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2661 t2 = tcg_temp_new_i64();
2662 tcg_gen_extu_i32_i64(t2, cc_op);
2663 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2664 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2665 tcg_temp_free_i64(t1);
2666 tcg_temp_free_i64(t2);
2667 return DISAS_NEXT;
2670 #ifndef CONFIG_USER_ONLY
2671 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2673 TCGv_i32 m4;
2675 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2676 m4 = tcg_const_i32(get_field(s, m4));
2677 } else {
2678 m4 = tcg_const_i32(0);
2680 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2681 tcg_temp_free_i32(m4);
2682 return DISAS_NEXT;
2685 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2687 TCGv_i32 m4;
2689 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2690 m4 = tcg_const_i32(get_field(s, m4));
2691 } else {
2692 m4 = tcg_const_i32(0);
2694 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2695 tcg_temp_free_i32(m4);
2696 return DISAS_NEXT;
2699 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2701 gen_helper_iske(o->out, cpu_env, o->in2);
2702 return DISAS_NEXT;
2704 #endif
2706 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2708 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2709 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2710 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2711 TCGv_i32 t_r1, t_r2, t_r3, type;
2713 switch (s->insn->data) {
2714 case S390_FEAT_TYPE_KMA:
2715 if (r3 == r1 || r3 == r2) {
2716 gen_program_exception(s, PGM_SPECIFICATION);
2717 return DISAS_NORETURN;
2719 /* FALL THROUGH */
2720 case S390_FEAT_TYPE_KMCTR:
2721 if (r3 & 1 || !r3) {
2722 gen_program_exception(s, PGM_SPECIFICATION);
2723 return DISAS_NORETURN;
2725 /* FALL THROUGH */
2726 case S390_FEAT_TYPE_PPNO:
2727 case S390_FEAT_TYPE_KMF:
2728 case S390_FEAT_TYPE_KMC:
2729 case S390_FEAT_TYPE_KMO:
2730 case S390_FEAT_TYPE_KM:
2731 if (r1 & 1 || !r1) {
2732 gen_program_exception(s, PGM_SPECIFICATION);
2733 return DISAS_NORETURN;
2735 /* FALL THROUGH */
2736 case S390_FEAT_TYPE_KMAC:
2737 case S390_FEAT_TYPE_KIMD:
2738 case S390_FEAT_TYPE_KLMD:
2739 if (r2 & 1 || !r2) {
2740 gen_program_exception(s, PGM_SPECIFICATION);
2741 return DISAS_NORETURN;
2743 /* FALL THROUGH */
2744 case S390_FEAT_TYPE_PCKMO:
2745 case S390_FEAT_TYPE_PCC:
2746 break;
2747 default:
2748 g_assert_not_reached();
2751 t_r1 = tcg_const_i32(r1);
2752 t_r2 = tcg_const_i32(r2);
2753 t_r3 = tcg_const_i32(r3);
2754 type = tcg_const_i32(s->insn->data);
2755 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2756 set_cc_static(s);
2757 tcg_temp_free_i32(t_r1);
2758 tcg_temp_free_i32(t_r2);
2759 tcg_temp_free_i32(t_r3);
2760 tcg_temp_free_i32(type);
2761 return DISAS_NEXT;
2764 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2766 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2767 set_cc_static(s);
2768 return DISAS_NEXT;
2771 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2773 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2774 set_cc_static(s);
2775 return DISAS_NEXT;
2778 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2780 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2781 set_cc_static(s);
2782 return DISAS_NEXT;
2785 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2787 /* The real output is indeed the original value in memory;
2788 recompute the addition for the computation of CC. */
2789 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2790 s->insn->data | MO_ALIGN);
2791 /* However, we need to recompute the addition for setting CC. */
2792 tcg_gen_add_i64(o->out, o->in1, o->in2);
2793 return DISAS_NEXT;
2796 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2798 /* The real output is indeed the original value in memory;
2799 recompute the addition for the computation of CC. */
2800 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2801 s->insn->data | MO_ALIGN);
2802 /* However, we need to recompute the operation for setting CC. */
2803 tcg_gen_and_i64(o->out, o->in1, o->in2);
2804 return DISAS_NEXT;
2807 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2809 /* The real output is indeed the original value in memory;
2810 recompute the addition for the computation of CC. */
2811 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2812 s->insn->data | MO_ALIGN);
2813 /* However, we need to recompute the operation for setting CC. */
2814 tcg_gen_or_i64(o->out, o->in1, o->in2);
2815 return DISAS_NEXT;
2818 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2820 /* The real output is indeed the original value in memory;
2821 recompute the addition for the computation of CC. */
2822 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2823 s->insn->data | MO_ALIGN);
2824 /* However, we need to recompute the operation for setting CC. */
2825 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2826 return DISAS_NEXT;
2829 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2831 gen_helper_ldeb(o->out, cpu_env, o->in2);
2832 return DISAS_NEXT;
2835 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2837 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2839 if (!m34) {
2840 return DISAS_NORETURN;
2842 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2843 tcg_temp_free_i32(m34);
2844 return DISAS_NEXT;
2847 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2849 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2851 if (!m34) {
2852 return DISAS_NORETURN;
2854 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2855 tcg_temp_free_i32(m34);
2856 return DISAS_NEXT;
2859 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2861 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2863 if (!m34) {
2864 return DISAS_NORETURN;
2866 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2867 tcg_temp_free_i32(m34);
2868 return DISAS_NEXT;
2871 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2873 gen_helper_lxdb(o->out, cpu_env, o->in2);
2874 return_low128(o->out2);
2875 return DISAS_NEXT;
2878 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2880 gen_helper_lxeb(o->out, cpu_env, o->in2);
2881 return_low128(o->out2);
2882 return DISAS_NEXT;
2885 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2887 tcg_gen_shli_i64(o->out, o->in2, 32);
2888 return DISAS_NEXT;
2891 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2893 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2894 return DISAS_NEXT;
2897 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2899 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2900 return DISAS_NEXT;
2903 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2905 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2906 return DISAS_NEXT;
2909 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2911 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2912 return DISAS_NEXT;
2915 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2917 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2918 return DISAS_NEXT;
2921 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2923 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2924 return DISAS_NEXT;
2927 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2929 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2930 return DISAS_NEXT;
2933 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2935 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2936 return DISAS_NEXT;
2939 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2941 TCGLabel *lab = gen_new_label();
2942 store_reg32_i64(get_field(s, r1), o->in2);
2943 /* The value is stored even in case of trap. */
2944 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2945 gen_trap(s);
2946 gen_set_label(lab);
2947 return DISAS_NEXT;
2950 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2952 TCGLabel *lab = gen_new_label();
2953 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2954 /* The value is stored even in case of trap. */
2955 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2956 gen_trap(s);
2957 gen_set_label(lab);
2958 return DISAS_NEXT;
2961 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2963 TCGLabel *lab = gen_new_label();
2964 store_reg32h_i64(get_field(s, r1), o->in2);
2965 /* The value is stored even in case of trap. */
2966 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2967 gen_trap(s);
2968 gen_set_label(lab);
2969 return DISAS_NEXT;
2972 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2974 TCGLabel *lab = gen_new_label();
2975 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2976 /* The value is stored even in case of trap. */
2977 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2978 gen_trap(s);
2979 gen_set_label(lab);
2980 return DISAS_NEXT;
2983 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2985 TCGLabel *lab = gen_new_label();
2986 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2987 /* The value is stored even in case of trap. */
2988 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2989 gen_trap(s);
2990 gen_set_label(lab);
2991 return DISAS_NEXT;
2994 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2996 DisasCompare c;
2998 disas_jcc(s, &c, get_field(s, m3));
3000 if (c.is_64) {
3001 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
3002 o->in2, o->in1);
3003 free_compare(&c);
3004 } else {
3005 TCGv_i32 t32 = tcg_temp_new_i32();
3006 TCGv_i64 t, z;
3008 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3009 free_compare(&c);
3011 t = tcg_temp_new_i64();
3012 tcg_gen_extu_i32_i64(t, t32);
3013 tcg_temp_free_i32(t32);
3015 z = tcg_const_i64(0);
3016 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3017 tcg_temp_free_i64(t);
3018 tcg_temp_free_i64(z);
3021 return DISAS_NEXT;
3024 #ifndef CONFIG_USER_ONLY
3025 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3027 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3028 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3029 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3030 tcg_temp_free_i32(r1);
3031 tcg_temp_free_i32(r3);
3032 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3033 return DISAS_PC_STALE_NOCHAIN;
3036 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3038 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3039 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3040 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3041 tcg_temp_free_i32(r1);
3042 tcg_temp_free_i32(r3);
3043 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3044 return DISAS_PC_STALE_NOCHAIN;
3047 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3049 gen_helper_lra(o->out, cpu_env, o->in2);
3050 set_cc_static(s);
3051 return DISAS_NEXT;
3054 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3056 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3057 return DISAS_NEXT;
3060 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3062 TCGv_i64 t1, t2;
3064 per_breaking_event(s);
3066 t1 = tcg_temp_new_i64();
3067 t2 = tcg_temp_new_i64();
3068 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3069 MO_TEUL | MO_ALIGN_8);
3070 tcg_gen_addi_i64(o->in2, o->in2, 4);
3071 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3072 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3073 tcg_gen_shli_i64(t1, t1, 32);
3074 gen_helper_load_psw(cpu_env, t1, t2);
3075 tcg_temp_free_i64(t1);
3076 tcg_temp_free_i64(t2);
3077 return DISAS_NORETURN;
3080 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3082 TCGv_i64 t1, t2;
3084 per_breaking_event(s);
3086 t1 = tcg_temp_new_i64();
3087 t2 = tcg_temp_new_i64();
3088 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3089 MO_TEQ | MO_ALIGN_8);
3090 tcg_gen_addi_i64(o->in2, o->in2, 8);
3091 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3092 gen_helper_load_psw(cpu_env, t1, t2);
3093 tcg_temp_free_i64(t1);
3094 tcg_temp_free_i64(t2);
3095 return DISAS_NORETURN;
3097 #endif
3099 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3101 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3102 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3103 gen_helper_lam(cpu_env, r1, o->in2, r3);
3104 tcg_temp_free_i32(r1);
3105 tcg_temp_free_i32(r3);
3106 return DISAS_NEXT;
3109 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3111 int r1 = get_field(s, r1);
3112 int r3 = get_field(s, r3);
3113 TCGv_i64 t1, t2;
3115 /* Only one register to read. */
3116 t1 = tcg_temp_new_i64();
3117 if (unlikely(r1 == r3)) {
3118 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3119 store_reg32_i64(r1, t1);
3120 tcg_temp_free(t1);
3121 return DISAS_NEXT;
3124 /* First load the values of the first and last registers to trigger
3125 possible page faults. */
3126 t2 = tcg_temp_new_i64();
3127 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3128 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3129 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3130 store_reg32_i64(r1, t1);
3131 store_reg32_i64(r3, t2);
3133 /* Only two registers to read. */
3134 if (((r1 + 1) & 15) == r3) {
3135 tcg_temp_free(t2);
3136 tcg_temp_free(t1);
3137 return DISAS_NEXT;
3140 /* Then load the remaining registers. Page fault can't occur. */
3141 r3 = (r3 - 1) & 15;
3142 tcg_gen_movi_i64(t2, 4);
3143 while (r1 != r3) {
3144 r1 = (r1 + 1) & 15;
3145 tcg_gen_add_i64(o->in2, o->in2, t2);
3146 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3147 store_reg32_i64(r1, t1);
3149 tcg_temp_free(t2);
3150 tcg_temp_free(t1);
3152 return DISAS_NEXT;
3155 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3157 int r1 = get_field(s, r1);
3158 int r3 = get_field(s, r3);
3159 TCGv_i64 t1, t2;
3161 /* Only one register to read. */
3162 t1 = tcg_temp_new_i64();
3163 if (unlikely(r1 == r3)) {
3164 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3165 store_reg32h_i64(r1, t1);
3166 tcg_temp_free(t1);
3167 return DISAS_NEXT;
3170 /* First load the values of the first and last registers to trigger
3171 possible page faults. */
3172 t2 = tcg_temp_new_i64();
3173 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3174 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3175 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3176 store_reg32h_i64(r1, t1);
3177 store_reg32h_i64(r3, t2);
3179 /* Only two registers to read. */
3180 if (((r1 + 1) & 15) == r3) {
3181 tcg_temp_free(t2);
3182 tcg_temp_free(t1);
3183 return DISAS_NEXT;
3186 /* Then load the remaining registers. Page fault can't occur. */
3187 r3 = (r3 - 1) & 15;
3188 tcg_gen_movi_i64(t2, 4);
3189 while (r1 != r3) {
3190 r1 = (r1 + 1) & 15;
3191 tcg_gen_add_i64(o->in2, o->in2, t2);
3192 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3193 store_reg32h_i64(r1, t1);
3195 tcg_temp_free(t2);
3196 tcg_temp_free(t1);
3198 return DISAS_NEXT;
3201 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3203 int r1 = get_field(s, r1);
3204 int r3 = get_field(s, r3);
3205 TCGv_i64 t1, t2;
3207 /* Only one register to read. */
3208 if (unlikely(r1 == r3)) {
3209 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3210 return DISAS_NEXT;
3213 /* First load the values of the first and last registers to trigger
3214 possible page faults. */
3215 t1 = tcg_temp_new_i64();
3216 t2 = tcg_temp_new_i64();
3217 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3218 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3219 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3220 tcg_gen_mov_i64(regs[r1], t1);
3221 tcg_temp_free(t2);
3223 /* Only two registers to read. */
3224 if (((r1 + 1) & 15) == r3) {
3225 tcg_temp_free(t1);
3226 return DISAS_NEXT;
3229 /* Then load the remaining registers. Page fault can't occur. */
3230 r3 = (r3 - 1) & 15;
3231 tcg_gen_movi_i64(t1, 8);
3232 while (r1 != r3) {
3233 r1 = (r1 + 1) & 15;
3234 tcg_gen_add_i64(o->in2, o->in2, t1);
3235 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3237 tcg_temp_free(t1);
3239 return DISAS_NEXT;
3242 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3244 TCGv_i64 a1, a2;
3245 MemOp mop = s->insn->data;
3247 /* In a parallel context, stop the world and single step. */
3248 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3249 update_psw_addr(s);
3250 update_cc_op(s);
3251 gen_exception(EXCP_ATOMIC);
3252 return DISAS_NORETURN;
3255 /* In a serial context, perform the two loads ... */
3256 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3257 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3258 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3259 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3260 tcg_temp_free_i64(a1);
3261 tcg_temp_free_i64(a2);
3263 /* ... and indicate that we performed them while interlocked. */
3264 gen_op_movi_cc(s, 0);
3265 return DISAS_NEXT;
3268 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3270 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3271 gen_helper_lpq(o->out, cpu_env, o->in2);
3272 } else if (HAVE_ATOMIC128) {
3273 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3274 } else {
3275 gen_helper_exit_atomic(cpu_env);
3276 return DISAS_NORETURN;
3278 return_low128(o->out2);
3279 return DISAS_NEXT;
3282 #ifndef CONFIG_USER_ONLY
3283 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3285 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
3286 tcg_gen_qemu_ld_tl(o->out, o->addr1, MMU_REAL_IDX, s->insn->data);
3287 return DISAS_NEXT;
3289 #endif
3291 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3293 tcg_gen_andi_i64(o->out, o->in2, -256);
3294 return DISAS_NEXT;
3297 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3299 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3301 if (get_field(s, m3) > 6) {
3302 gen_program_exception(s, PGM_SPECIFICATION);
3303 return DISAS_NORETURN;
3306 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3307 tcg_gen_neg_i64(o->addr1, o->addr1);
3308 tcg_gen_movi_i64(o->out, 16);
3309 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3310 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3311 return DISAS_NEXT;
3314 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3316 #if !defined(CONFIG_USER_ONLY)
3317 TCGv_i32 i2;
3318 #endif
3319 const uint16_t monitor_class = get_field(s, i2);
3321 if (monitor_class & 0xff00) {
3322 gen_program_exception(s, PGM_SPECIFICATION);
3323 return DISAS_NORETURN;
3326 #if !defined(CONFIG_USER_ONLY)
3327 i2 = tcg_const_i32(monitor_class);
3328 gen_helper_monitor_call(cpu_env, o->addr1, i2);
3329 tcg_temp_free_i32(i2);
3330 #endif
3331 /* Defaults to a NOP. */
3332 return DISAS_NEXT;
3335 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3337 o->out = o->in2;
3338 o->g_out = o->g_in2;
3339 o->in2 = NULL;
3340 o->g_in2 = false;
3341 return DISAS_NEXT;
3344 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3346 int b2 = get_field(s, b2);
3347 TCGv ar1 = tcg_temp_new_i64();
3349 o->out = o->in2;
3350 o->g_out = o->g_in2;
3351 o->in2 = NULL;
3352 o->g_in2 = false;
3354 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3355 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3356 tcg_gen_movi_i64(ar1, 0);
3357 break;
3358 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3359 tcg_gen_movi_i64(ar1, 1);
3360 break;
3361 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3362 if (b2) {
3363 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3364 } else {
3365 tcg_gen_movi_i64(ar1, 0);
3367 break;
3368 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3369 tcg_gen_movi_i64(ar1, 2);
3370 break;
3373 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3374 tcg_temp_free_i64(ar1);
3376 return DISAS_NEXT;
3379 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3381 o->out = o->in1;
3382 o->out2 = o->in2;
3383 o->g_out = o->g_in1;
3384 o->g_out2 = o->g_in2;
3385 o->in1 = NULL;
3386 o->in2 = NULL;
3387 o->g_in1 = o->g_in2 = false;
3388 return DISAS_NEXT;
3391 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3393 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3394 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3395 tcg_temp_free_i32(l);
3396 return DISAS_NEXT;
3399 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3401 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3402 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3403 tcg_temp_free_i32(l);
3404 return DISAS_NEXT;
3407 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3409 int r1 = get_field(s, r1);
3410 int r2 = get_field(s, r2);
3411 TCGv_i32 t1, t2;
3413 /* r1 and r2 must be even. */
3414 if (r1 & 1 || r2 & 1) {
3415 gen_program_exception(s, PGM_SPECIFICATION);
3416 return DISAS_NORETURN;
3419 t1 = tcg_const_i32(r1);
3420 t2 = tcg_const_i32(r2);
3421 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3422 tcg_temp_free_i32(t1);
3423 tcg_temp_free_i32(t2);
3424 set_cc_static(s);
3425 return DISAS_NEXT;
3428 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3430 int r1 = get_field(s, r1);
3431 int r3 = get_field(s, r3);
3432 TCGv_i32 t1, t3;
3434 /* r1 and r3 must be even. */
3435 if (r1 & 1 || r3 & 1) {
3436 gen_program_exception(s, PGM_SPECIFICATION);
3437 return DISAS_NORETURN;
3440 t1 = tcg_const_i32(r1);
3441 t3 = tcg_const_i32(r3);
3442 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3443 tcg_temp_free_i32(t1);
3444 tcg_temp_free_i32(t3);
3445 set_cc_static(s);
3446 return DISAS_NEXT;
3449 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3451 int r1 = get_field(s, r1);
3452 int r3 = get_field(s, r3);
3453 TCGv_i32 t1, t3;
3455 /* r1 and r3 must be even. */
3456 if (r1 & 1 || r3 & 1) {
3457 gen_program_exception(s, PGM_SPECIFICATION);
3458 return DISAS_NORETURN;
3461 t1 = tcg_const_i32(r1);
3462 t3 = tcg_const_i32(r3);
3463 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3464 tcg_temp_free_i32(t1);
3465 tcg_temp_free_i32(t3);
3466 set_cc_static(s);
3467 return DISAS_NEXT;
3470 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3472 int r3 = get_field(s, r3);
3473 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3474 set_cc_static(s);
3475 return DISAS_NEXT;
3478 #ifndef CONFIG_USER_ONLY
3479 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3481 int r1 = get_field(s, l1);
3482 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3483 set_cc_static(s);
3484 return DISAS_NEXT;
3487 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3489 int r1 = get_field(s, l1);
3490 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3491 set_cc_static(s);
3492 return DISAS_NEXT;
3494 #endif
3496 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3498 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3499 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3500 tcg_temp_free_i32(l);
3501 return DISAS_NEXT;
3504 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3506 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3507 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3508 tcg_temp_free_i32(l);
3509 return DISAS_NEXT;
3512 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3514 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3515 set_cc_static(s);
3516 return DISAS_NEXT;
3519 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3521 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3522 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3524 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3525 tcg_temp_free_i32(t1);
3526 tcg_temp_free_i32(t2);
3527 set_cc_static(s);
3528 return DISAS_NEXT;
3531 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3533 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3534 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3535 tcg_temp_free_i32(l);
3536 return DISAS_NEXT;
3539 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3541 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3542 return DISAS_NEXT;
3545 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3547 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3548 return DISAS_NEXT;
3551 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3553 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3554 return DISAS_NEXT;
3557 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3559 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3560 return DISAS_NEXT;
3563 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3565 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3566 return DISAS_NEXT;
3569 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3571 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3572 return DISAS_NEXT;
3575 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3577 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3578 return_low128(o->out2);
3579 return DISAS_NEXT;
3582 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3584 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3585 return_low128(o->out2);
3586 return DISAS_NEXT;
3589 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3591 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3592 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3593 tcg_temp_free_i64(r3);
3594 return DISAS_NEXT;
3597 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3599 TCGv_i64 r3 = load_freg(get_field(s, r3));
3600 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3601 tcg_temp_free_i64(r3);
3602 return DISAS_NEXT;
3605 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3607 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3608 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3609 tcg_temp_free_i64(r3);
3610 return DISAS_NEXT;
3613 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3615 TCGv_i64 r3 = load_freg(get_field(s, r3));
3616 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3617 tcg_temp_free_i64(r3);
3618 return DISAS_NEXT;
3621 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3623 TCGv_i64 z, n;
3624 z = tcg_const_i64(0);
3625 n = tcg_temp_new_i64();
3626 tcg_gen_neg_i64(n, o->in2);
3627 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3628 tcg_temp_free_i64(n);
3629 tcg_temp_free_i64(z);
3630 return DISAS_NEXT;
3633 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3635 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3636 return DISAS_NEXT;
3639 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3641 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3642 return DISAS_NEXT;
3645 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3647 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3648 tcg_gen_mov_i64(o->out2, o->in2);
3649 return DISAS_NEXT;
3652 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3654 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3655 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3656 tcg_temp_free_i32(l);
3657 set_cc_static(s);
3658 return DISAS_NEXT;
3661 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3663 tcg_gen_neg_i64(o->out, o->in2);
3664 return DISAS_NEXT;
3667 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3669 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3670 return DISAS_NEXT;
3673 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3675 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3676 return DISAS_NEXT;
3679 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3681 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3682 tcg_gen_mov_i64(o->out2, o->in2);
3683 return DISAS_NEXT;
3686 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3688 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3689 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3690 tcg_temp_free_i32(l);
3691 set_cc_static(s);
3692 return DISAS_NEXT;
3695 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3697 tcg_gen_or_i64(o->out, o->in1, o->in2);
3698 return DISAS_NEXT;
3701 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3703 int shift = s->insn->data & 0xff;
3704 int size = s->insn->data >> 8;
3705 uint64_t mask = ((1ull << size) - 1) << shift;
3707 assert(!o->g_in2);
3708 tcg_gen_shli_i64(o->in2, o->in2, shift);
3709 tcg_gen_or_i64(o->out, o->in1, o->in2);
3711 /* Produce the CC from only the bits manipulated. */
3712 tcg_gen_andi_i64(cc_dst, o->out, mask);
3713 set_cc_nz_u64(s, cc_dst);
3714 return DISAS_NEXT;
3717 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3719 o->in1 = tcg_temp_new_i64();
3721 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3722 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3723 } else {
3724 /* Perform the atomic operation in memory. */
3725 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3726 s->insn->data);
3729 /* Recompute also for atomic case: needed for setting CC. */
3730 tcg_gen_or_i64(o->out, o->in1, o->in2);
3732 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3733 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3735 return DISAS_NEXT;
3738 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3740 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3741 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3742 tcg_temp_free_i32(l);
3743 return DISAS_NEXT;
3746 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3748 int l2 = get_field(s, l2) + 1;
3749 TCGv_i32 l;
3751 /* The length must not exceed 32 bytes. */
3752 if (l2 > 32) {
3753 gen_program_exception(s, PGM_SPECIFICATION);
3754 return DISAS_NORETURN;
3756 l = tcg_const_i32(l2);
3757 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3758 tcg_temp_free_i32(l);
3759 return DISAS_NEXT;
3762 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3764 int l2 = get_field(s, l2) + 1;
3765 TCGv_i32 l;
3767 /* The length must be even and should not exceed 64 bytes. */
3768 if ((l2 & 1) || (l2 > 64)) {
3769 gen_program_exception(s, PGM_SPECIFICATION);
3770 return DISAS_NORETURN;
3772 l = tcg_const_i32(l2);
3773 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3774 tcg_temp_free_i32(l);
3775 return DISAS_NEXT;
3778 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3780 gen_helper_popcnt(o->out, o->in2);
3781 return DISAS_NEXT;
3784 #ifndef CONFIG_USER_ONLY
3785 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3787 gen_helper_ptlb(cpu_env);
3788 return DISAS_NEXT;
3790 #endif
3792 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3794 int i3 = get_field(s, i3);
3795 int i4 = get_field(s, i4);
3796 int i5 = get_field(s, i5);
3797 int do_zero = i4 & 0x80;
3798 uint64_t mask, imask, pmask;
3799 int pos, len, rot;
3801 /* Adjust the arguments for the specific insn. */
3802 switch (s->fields.op2) {
3803 case 0x55: /* risbg */
3804 case 0x59: /* risbgn */
3805 i3 &= 63;
3806 i4 &= 63;
3807 pmask = ~0;
3808 break;
3809 case 0x5d: /* risbhg */
3810 i3 &= 31;
3811 i4 &= 31;
3812 pmask = 0xffffffff00000000ull;
3813 break;
3814 case 0x51: /* risblg */
3815 i3 &= 31;
3816 i4 &= 31;
3817 pmask = 0x00000000ffffffffull;
3818 break;
3819 default:
3820 g_assert_not_reached();
3823 /* MASK is the set of bits to be inserted from R2.
3824 Take care for I3/I4 wraparound. */
3825 mask = pmask >> i3;
3826 if (i3 <= i4) {
3827 mask ^= pmask >> i4 >> 1;
3828 } else {
3829 mask |= ~(pmask >> i4 >> 1);
3831 mask &= pmask;
3833 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3834 insns, we need to keep the other half of the register. */
3835 imask = ~mask | ~pmask;
3836 if (do_zero) {
3837 imask = ~pmask;
3840 len = i4 - i3 + 1;
3841 pos = 63 - i4;
3842 rot = i5 & 63;
3843 if (s->fields.op2 == 0x5d) {
3844 pos += 32;
3847 /* In some cases we can implement this with extract. */
3848 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3849 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3850 return DISAS_NEXT;
3853 /* In some cases we can implement this with deposit. */
3854 if (len > 0 && (imask == 0 || ~mask == imask)) {
3855 /* Note that we rotate the bits to be inserted to the lsb, not to
3856 the position as described in the PoO. */
3857 rot = (rot - pos) & 63;
3858 } else {
3859 pos = -1;
3862 /* Rotate the input as necessary. */
3863 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3865 /* Insert the selected bits into the output. */
3866 if (pos >= 0) {
3867 if (imask == 0) {
3868 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3869 } else {
3870 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3872 } else if (imask == 0) {
3873 tcg_gen_andi_i64(o->out, o->in2, mask);
3874 } else {
3875 tcg_gen_andi_i64(o->in2, o->in2, mask);
3876 tcg_gen_andi_i64(o->out, o->out, imask);
3877 tcg_gen_or_i64(o->out, o->out, o->in2);
3879 return DISAS_NEXT;
3882 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3884 int i3 = get_field(s, i3);
3885 int i4 = get_field(s, i4);
3886 int i5 = get_field(s, i5);
3887 uint64_t mask;
3889 /* If this is a test-only form, arrange to discard the result. */
3890 if (i3 & 0x80) {
3891 o->out = tcg_temp_new_i64();
3892 o->g_out = false;
3895 i3 &= 63;
3896 i4 &= 63;
3897 i5 &= 63;
3899 /* MASK is the set of bits to be operated on from R2.
3900 Take care for I3/I4 wraparound. */
3901 mask = ~0ull >> i3;
3902 if (i3 <= i4) {
3903 mask ^= ~0ull >> i4 >> 1;
3904 } else {
3905 mask |= ~(~0ull >> i4 >> 1);
3908 /* Rotate the input as necessary. */
3909 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3911 /* Operate. */
3912 switch (s->fields.op2) {
3913 case 0x54: /* AND */
3914 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3915 tcg_gen_and_i64(o->out, o->out, o->in2);
3916 break;
3917 case 0x56: /* OR */
3918 tcg_gen_andi_i64(o->in2, o->in2, mask);
3919 tcg_gen_or_i64(o->out, o->out, o->in2);
3920 break;
3921 case 0x57: /* XOR */
3922 tcg_gen_andi_i64(o->in2, o->in2, mask);
3923 tcg_gen_xor_i64(o->out, o->out, o->in2);
3924 break;
3925 default:
3926 abort();
3929 /* Set the CC. */
3930 tcg_gen_andi_i64(cc_dst, o->out, mask);
3931 set_cc_nz_u64(s, cc_dst);
3932 return DISAS_NEXT;
3935 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3937 tcg_gen_bswap16_i64(o->out, o->in2);
3938 return DISAS_NEXT;
3941 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3943 tcg_gen_bswap32_i64(o->out, o->in2);
3944 return DISAS_NEXT;
3947 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3949 tcg_gen_bswap64_i64(o->out, o->in2);
3950 return DISAS_NEXT;
3953 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3955 TCGv_i32 t1 = tcg_temp_new_i32();
3956 TCGv_i32 t2 = tcg_temp_new_i32();
3957 TCGv_i32 to = tcg_temp_new_i32();
3958 tcg_gen_extrl_i64_i32(t1, o->in1);
3959 tcg_gen_extrl_i64_i32(t2, o->in2);
3960 tcg_gen_rotl_i32(to, t1, t2);
3961 tcg_gen_extu_i32_i64(o->out, to);
3962 tcg_temp_free_i32(t1);
3963 tcg_temp_free_i32(t2);
3964 tcg_temp_free_i32(to);
3965 return DISAS_NEXT;
3968 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3970 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3971 return DISAS_NEXT;
3974 #ifndef CONFIG_USER_ONLY
3975 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3977 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3978 set_cc_static(s);
3979 return DISAS_NEXT;
3982 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3984 gen_helper_sacf(cpu_env, o->in2);
3985 /* Addressing mode has changed, so end the block. */
3986 return DISAS_PC_STALE;
3988 #endif
3990 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3992 int sam = s->insn->data;
3993 TCGv_i64 tsam;
3994 uint64_t mask;
3996 switch (sam) {
3997 case 0:
3998 mask = 0xffffff;
3999 break;
4000 case 1:
4001 mask = 0x7fffffff;
4002 break;
4003 default:
4004 mask = -1;
4005 break;
4008 /* Bizarre but true, we check the address of the current insn for the
4009 specification exception, not the next to be executed. Thus the PoO
4010 documents that Bad Things Happen two bytes before the end. */
4011 if (s->base.pc_next & ~mask) {
4012 gen_program_exception(s, PGM_SPECIFICATION);
4013 return DISAS_NORETURN;
4015 s->pc_tmp &= mask;
4017 tsam = tcg_const_i64(sam);
4018 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4019 tcg_temp_free_i64(tsam);
4021 /* Always exit the TB, since we (may have) changed execution mode. */
4022 return DISAS_PC_STALE;
4025 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4027 int r1 = get_field(s, r1);
4028 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4029 return DISAS_NEXT;
4032 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4034 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4035 return DISAS_NEXT;
4038 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4040 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4041 return DISAS_NEXT;
4044 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4046 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4047 return_low128(o->out2);
4048 return DISAS_NEXT;
4051 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4053 gen_helper_sqeb(o->out, cpu_env, o->in2);
4054 return DISAS_NEXT;
4057 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4059 gen_helper_sqdb(o->out, cpu_env, o->in2);
4060 return DISAS_NEXT;
4063 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4065 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4066 return_low128(o->out2);
4067 return DISAS_NEXT;
4070 #ifndef CONFIG_USER_ONLY
4071 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4073 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4074 set_cc_static(s);
4075 return DISAS_NEXT;
4078 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4080 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4081 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4082 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4083 set_cc_static(s);
4084 tcg_temp_free_i32(r1);
4085 tcg_temp_free_i32(r3);
4086 return DISAS_NEXT;
4088 #endif
4090 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4092 DisasCompare c;
4093 TCGv_i64 a, h;
4094 TCGLabel *lab;
4095 int r1;
4097 disas_jcc(s, &c, get_field(s, m3));
4099 /* We want to store when the condition is fulfilled, so branch
4100 out when it's not */
4101 c.cond = tcg_invert_cond(c.cond);
4103 lab = gen_new_label();
4104 if (c.is_64) {
4105 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4106 } else {
4107 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4109 free_compare(&c);
4111 r1 = get_field(s, r1);
4112 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4113 switch (s->insn->data) {
4114 case 1: /* STOCG */
4115 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4116 break;
4117 case 0: /* STOC */
4118 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4119 break;
4120 case 2: /* STOCFH */
4121 h = tcg_temp_new_i64();
4122 tcg_gen_shri_i64(h, regs[r1], 32);
4123 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4124 tcg_temp_free_i64(h);
4125 break;
4126 default:
4127 g_assert_not_reached();
4129 tcg_temp_free_i64(a);
4131 gen_set_label(lab);
4132 return DISAS_NEXT;
4135 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4137 uint64_t sign = 1ull << s->insn->data;
4138 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4139 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4140 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4141 /* The arithmetic left shift is curious in that it does not affect
4142 the sign bit. Copy that over from the source unchanged. */
4143 tcg_gen_andi_i64(o->out, o->out, ~sign);
4144 tcg_gen_andi_i64(o->in1, o->in1, sign);
4145 tcg_gen_or_i64(o->out, o->out, o->in1);
4146 return DISAS_NEXT;
4149 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4151 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4152 return DISAS_NEXT;
4155 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4157 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4158 return DISAS_NEXT;
4161 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4163 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4164 return DISAS_NEXT;
4167 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4169 gen_helper_sfpc(cpu_env, o->in2);
4170 return DISAS_NEXT;
4173 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4175 gen_helper_sfas(cpu_env, o->in2);
4176 return DISAS_NEXT;
4179 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4181 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4182 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4183 gen_helper_srnm(cpu_env, o->addr1);
4184 return DISAS_NEXT;
4187 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4189 /* Bits 0-55 are are ignored. */
4190 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4191 gen_helper_srnm(cpu_env, o->addr1);
4192 return DISAS_NEXT;
4195 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4197 TCGv_i64 tmp = tcg_temp_new_i64();
4199 /* Bits other than 61-63 are ignored. */
4200 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4202 /* No need to call a helper, we don't implement dfp */
4203 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4204 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4205 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4207 tcg_temp_free_i64(tmp);
4208 return DISAS_NEXT;
4211 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4213 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4214 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4215 set_cc_static(s);
4217 tcg_gen_shri_i64(o->in1, o->in1, 24);
4218 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4219 return DISAS_NEXT;
4222 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4224 int b1 = get_field(s, b1);
4225 int d1 = get_field(s, d1);
4226 int b2 = get_field(s, b2);
4227 int d2 = get_field(s, d2);
4228 int r3 = get_field(s, r3);
4229 TCGv_i64 tmp = tcg_temp_new_i64();
4231 /* fetch all operands first */
4232 o->in1 = tcg_temp_new_i64();
4233 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4234 o->in2 = tcg_temp_new_i64();
4235 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4236 o->addr1 = get_address(s, 0, r3, 0);
4238 /* load the third operand into r3 before modifying anything */
4239 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4241 /* subtract CPU timer from first operand and store in GR0 */
4242 gen_helper_stpt(tmp, cpu_env);
4243 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4245 /* store second operand in GR1 */
4246 tcg_gen_mov_i64(regs[1], o->in2);
4248 tcg_temp_free_i64(tmp);
4249 return DISAS_NEXT;
4252 #ifndef CONFIG_USER_ONLY
4253 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4255 tcg_gen_shri_i64(o->in2, o->in2, 4);
4256 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4257 return DISAS_NEXT;
4260 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4262 gen_helper_sske(cpu_env, o->in1, o->in2);
4263 return DISAS_NEXT;
4266 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4268 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4269 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4270 return DISAS_PC_STALE_NOCHAIN;
4273 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4275 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4276 return DISAS_NEXT;
4278 #endif
4280 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4282 gen_helper_stck(o->out, cpu_env);
4283 /* ??? We don't implement clock states. */
4284 gen_op_movi_cc(s, 0);
4285 return DISAS_NEXT;
4288 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4290 TCGv_i64 c1 = tcg_temp_new_i64();
4291 TCGv_i64 c2 = tcg_temp_new_i64();
4292 TCGv_i64 todpr = tcg_temp_new_i64();
4293 gen_helper_stck(c1, cpu_env);
4294 /* 16 bit value store in an uint32_t (only valid bits set) */
4295 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4296 /* Shift the 64-bit value into its place as a zero-extended
4297 104-bit value. Note that "bit positions 64-103 are always
4298 non-zero so that they compare differently to STCK"; we set
4299 the least significant bit to 1. */
4300 tcg_gen_shli_i64(c2, c1, 56);
4301 tcg_gen_shri_i64(c1, c1, 8);
4302 tcg_gen_ori_i64(c2, c2, 0x10000);
4303 tcg_gen_or_i64(c2, c2, todpr);
4304 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4305 tcg_gen_addi_i64(o->in2, o->in2, 8);
4306 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4307 tcg_temp_free_i64(c1);
4308 tcg_temp_free_i64(c2);
4309 tcg_temp_free_i64(todpr);
4310 /* ??? We don't implement clock states. */
4311 gen_op_movi_cc(s, 0);
4312 return DISAS_NEXT;
4315 #ifndef CONFIG_USER_ONLY
4316 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4318 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4319 gen_helper_sck(cc_op, cpu_env, o->in1);
4320 set_cc_static(s);
4321 return DISAS_NEXT;
4324 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4326 gen_helper_sckc(cpu_env, o->in2);
4327 return DISAS_NEXT;
4330 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4332 gen_helper_sckpf(cpu_env, regs[0]);
4333 return DISAS_NEXT;
4336 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4338 gen_helper_stckc(o->out, cpu_env);
4339 return DISAS_NEXT;
4342 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4344 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4345 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4346 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4347 tcg_temp_free_i32(r1);
4348 tcg_temp_free_i32(r3);
4349 return DISAS_NEXT;
4352 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4354 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4355 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4356 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4357 tcg_temp_free_i32(r1);
4358 tcg_temp_free_i32(r3);
4359 return DISAS_NEXT;
4362 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4364 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4365 return DISAS_NEXT;
4368 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4370 gen_helper_spt(cpu_env, o->in2);
4371 return DISAS_NEXT;
4374 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4376 gen_helper_stfl(cpu_env);
4377 return DISAS_NEXT;
4380 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4382 gen_helper_stpt(o->out, cpu_env);
4383 return DISAS_NEXT;
4386 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4388 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4389 set_cc_static(s);
4390 return DISAS_NEXT;
4393 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4395 gen_helper_spx(cpu_env, o->in2);
4396 return DISAS_NEXT;
4399 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4401 gen_helper_xsch(cpu_env, regs[1]);
4402 set_cc_static(s);
4403 return DISAS_NEXT;
4406 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4408 gen_helper_csch(cpu_env, regs[1]);
4409 set_cc_static(s);
4410 return DISAS_NEXT;
4413 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4415 gen_helper_hsch(cpu_env, regs[1]);
4416 set_cc_static(s);
4417 return DISAS_NEXT;
4420 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4422 gen_helper_msch(cpu_env, regs[1], o->in2);
4423 set_cc_static(s);
4424 return DISAS_NEXT;
4427 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4429 gen_helper_rchp(cpu_env, regs[1]);
4430 set_cc_static(s);
4431 return DISAS_NEXT;
4434 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4436 gen_helper_rsch(cpu_env, regs[1]);
4437 set_cc_static(s);
4438 return DISAS_NEXT;
4441 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4443 gen_helper_sal(cpu_env, regs[1]);
4444 return DISAS_NEXT;
4447 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4449 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4450 return DISAS_NEXT;
4453 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4455 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4456 gen_op_movi_cc(s, 3);
4457 return DISAS_NEXT;
4460 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4462 /* The instruction is suppressed if not provided. */
4463 return DISAS_NEXT;
4466 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4468 gen_helper_ssch(cpu_env, regs[1], o->in2);
4469 set_cc_static(s);
4470 return DISAS_NEXT;
4473 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4475 gen_helper_stsch(cpu_env, regs[1], o->in2);
4476 set_cc_static(s);
4477 return DISAS_NEXT;
4480 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4482 gen_helper_stcrw(cpu_env, o->in2);
4483 set_cc_static(s);
4484 return DISAS_NEXT;
4487 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4489 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4490 set_cc_static(s);
4491 return DISAS_NEXT;
4494 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4496 gen_helper_tsch(cpu_env, regs[1], o->in2);
4497 set_cc_static(s);
4498 return DISAS_NEXT;
4501 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4503 gen_helper_chsc(cpu_env, o->in2);
4504 set_cc_static(s);
4505 return DISAS_NEXT;
4508 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4510 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4511 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4512 return DISAS_NEXT;
4515 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4517 uint64_t i2 = get_field(s, i2);
4518 TCGv_i64 t;
4520 /* It is important to do what the instruction name says: STORE THEN.
4521 If we let the output hook perform the store then if we fault and
4522 restart, we'll have the wrong SYSTEM MASK in place. */
4523 t = tcg_temp_new_i64();
4524 tcg_gen_shri_i64(t, psw_mask, 56);
4525 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4526 tcg_temp_free_i64(t);
4528 if (s->fields.op == 0xac) {
4529 tcg_gen_andi_i64(psw_mask, psw_mask,
4530 (i2 << 56) | 0x00ffffffffffffffull);
4531 } else {
4532 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4535 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4536 return DISAS_PC_STALE_NOCHAIN;
4539 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4541 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
4542 tcg_gen_qemu_st_tl(o->in1, o->addr1, MMU_REAL_IDX, s->insn->data);
4544 if (s->base.tb->flags & FLAG_MASK_PER) {
4545 update_psw_addr(s);
4546 gen_helper_per_store_real(cpu_env);
4548 return DISAS_NEXT;
4550 #endif
4552 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4554 gen_helper_stfle(cc_op, cpu_env, o->in2);
4555 set_cc_static(s);
4556 return DISAS_NEXT;
4559 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4561 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4562 return DISAS_NEXT;
4565 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4567 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4568 return DISAS_NEXT;
4571 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4573 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4574 return DISAS_NEXT;
4577 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4579 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4580 return DISAS_NEXT;
4583 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4585 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4586 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4587 gen_helper_stam(cpu_env, r1, o->in2, r3);
4588 tcg_temp_free_i32(r1);
4589 tcg_temp_free_i32(r3);
4590 return DISAS_NEXT;
4593 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4595 int m3 = get_field(s, m3);
4596 int pos, base = s->insn->data;
4597 TCGv_i64 tmp = tcg_temp_new_i64();
4599 pos = base + ctz32(m3) * 8;
4600 switch (m3) {
4601 case 0xf:
4602 /* Effectively a 32-bit store. */
4603 tcg_gen_shri_i64(tmp, o->in1, pos);
4604 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4605 break;
4607 case 0xc:
4608 case 0x6:
4609 case 0x3:
4610 /* Effectively a 16-bit store. */
4611 tcg_gen_shri_i64(tmp, o->in1, pos);
4612 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4613 break;
4615 case 0x8:
4616 case 0x4:
4617 case 0x2:
4618 case 0x1:
4619 /* Effectively an 8-bit store. */
4620 tcg_gen_shri_i64(tmp, o->in1, pos);
4621 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4622 break;
4624 default:
4625 /* This is going to be a sequence of shifts and stores. */
4626 pos = base + 32 - 8;
4627 while (m3) {
4628 if (m3 & 0x8) {
4629 tcg_gen_shri_i64(tmp, o->in1, pos);
4630 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4631 tcg_gen_addi_i64(o->in2, o->in2, 1);
4633 m3 = (m3 << 1) & 0xf;
4634 pos -= 8;
4636 break;
4638 tcg_temp_free_i64(tmp);
4639 return DISAS_NEXT;
4642 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4644 int r1 = get_field(s, r1);
4645 int r3 = get_field(s, r3);
4646 int size = s->insn->data;
4647 TCGv_i64 tsize = tcg_const_i64(size);
4649 while (1) {
4650 if (size == 8) {
4651 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4652 } else {
4653 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4655 if (r1 == r3) {
4656 break;
4658 tcg_gen_add_i64(o->in2, o->in2, tsize);
4659 r1 = (r1 + 1) & 15;
4662 tcg_temp_free_i64(tsize);
4663 return DISAS_NEXT;
4666 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4668 int r1 = get_field(s, r1);
4669 int r3 = get_field(s, r3);
4670 TCGv_i64 t = tcg_temp_new_i64();
4671 TCGv_i64 t4 = tcg_const_i64(4);
4672 TCGv_i64 t32 = tcg_const_i64(32);
4674 while (1) {
4675 tcg_gen_shl_i64(t, regs[r1], t32);
4676 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4677 if (r1 == r3) {
4678 break;
4680 tcg_gen_add_i64(o->in2, o->in2, t4);
4681 r1 = (r1 + 1) & 15;
4684 tcg_temp_free_i64(t);
4685 tcg_temp_free_i64(t4);
4686 tcg_temp_free_i64(t32);
4687 return DISAS_NEXT;
4690 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4692 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4693 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4694 } else if (HAVE_ATOMIC128) {
4695 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4696 } else {
4697 gen_helper_exit_atomic(cpu_env);
4698 return DISAS_NORETURN;
4700 return DISAS_NEXT;
4703 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4705 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4706 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4708 gen_helper_srst(cpu_env, r1, r2);
4710 tcg_temp_free_i32(r1);
4711 tcg_temp_free_i32(r2);
4712 set_cc_static(s);
4713 return DISAS_NEXT;
4716 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4718 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4719 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4721 gen_helper_srstu(cpu_env, r1, r2);
4723 tcg_temp_free_i32(r1);
4724 tcg_temp_free_i32(r2);
4725 set_cc_static(s);
4726 return DISAS_NEXT;
4729 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4731 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4732 return DISAS_NEXT;
4735 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4737 DisasCompare cmp;
4738 TCGv_i64 borrow;
4740 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4742 /* The !borrow flag is the msb of CC. Since we want the inverse of
4743 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4744 disas_jcc(s, &cmp, 8 | 4);
4745 borrow = tcg_temp_new_i64();
4746 if (cmp.is_64) {
4747 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4748 } else {
4749 TCGv_i32 t = tcg_temp_new_i32();
4750 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4751 tcg_gen_extu_i32_i64(borrow, t);
4752 tcg_temp_free_i32(t);
4754 free_compare(&cmp);
4756 tcg_gen_sub_i64(o->out, o->out, borrow);
4757 tcg_temp_free_i64(borrow);
4758 return DISAS_NEXT;
4761 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4763 TCGv_i32 t;
4765 update_psw_addr(s);
4766 update_cc_op(s);
4768 t = tcg_const_i32(get_field(s, i1) & 0xff);
4769 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4770 tcg_temp_free_i32(t);
4772 t = tcg_const_i32(s->ilen);
4773 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4774 tcg_temp_free_i32(t);
4776 gen_exception(EXCP_SVC);
4777 return DISAS_NORETURN;
4780 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4782 int cc = 0;
4784 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4785 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4786 gen_op_movi_cc(s, cc);
4787 return DISAS_NEXT;
4790 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4792 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4793 set_cc_static(s);
4794 return DISAS_NEXT;
4797 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4799 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4800 set_cc_static(s);
4801 return DISAS_NEXT;
4804 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4806 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4807 set_cc_static(s);
4808 return DISAS_NEXT;
4811 #ifndef CONFIG_USER_ONLY
4813 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4815 gen_helper_testblock(cc_op, cpu_env, o->in2);
4816 set_cc_static(s);
4817 return DISAS_NEXT;
4820 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4822 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4823 set_cc_static(s);
4824 return DISAS_NEXT;
4827 #endif
4829 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4831 TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4832 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4833 tcg_temp_free_i32(l1);
4834 set_cc_static(s);
4835 return DISAS_NEXT;
4838 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4840 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4841 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4842 tcg_temp_free_i32(l);
4843 set_cc_static(s);
4844 return DISAS_NEXT;
4847 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4849 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4850 return_low128(o->out2);
4851 set_cc_static(s);
4852 return DISAS_NEXT;
4855 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4857 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4858 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4859 tcg_temp_free_i32(l);
4860 set_cc_static(s);
4861 return DISAS_NEXT;
4864 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4866 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4867 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4868 tcg_temp_free_i32(l);
4869 set_cc_static(s);
4870 return DISAS_NEXT;
4873 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4875 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4876 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4877 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4878 TCGv_i32 tst = tcg_temp_new_i32();
4879 int m3 = get_field(s, m3);
4881 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4882 m3 = 0;
4884 if (m3 & 1) {
4885 tcg_gen_movi_i32(tst, -1);
4886 } else {
4887 tcg_gen_extrl_i64_i32(tst, regs[0]);
4888 if (s->insn->opc & 3) {
4889 tcg_gen_ext8u_i32(tst, tst);
4890 } else {
4891 tcg_gen_ext16u_i32(tst, tst);
4894 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4896 tcg_temp_free_i32(r1);
4897 tcg_temp_free_i32(r2);
4898 tcg_temp_free_i32(sizes);
4899 tcg_temp_free_i32(tst);
4900 set_cc_static(s);
4901 return DISAS_NEXT;
4904 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4906 TCGv_i32 t1 = tcg_const_i32(0xff);
4907 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4908 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4909 tcg_temp_free_i32(t1);
4910 set_cc_static(s);
4911 return DISAS_NEXT;
4914 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4916 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4917 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4918 tcg_temp_free_i32(l);
4919 return DISAS_NEXT;
4922 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4924 int l1 = get_field(s, l1) + 1;
4925 TCGv_i32 l;
4927 /* The length must not exceed 32 bytes. */
4928 if (l1 > 32) {
4929 gen_program_exception(s, PGM_SPECIFICATION);
4930 return DISAS_NORETURN;
4932 l = tcg_const_i32(l1);
4933 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4934 tcg_temp_free_i32(l);
4935 set_cc_static(s);
4936 return DISAS_NEXT;
4939 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4941 int l1 = get_field(s, l1) + 1;
4942 TCGv_i32 l;
4944 /* The length must be even and should not exceed 64 bytes. */
4945 if ((l1 & 1) || (l1 > 64)) {
4946 gen_program_exception(s, PGM_SPECIFICATION);
4947 return DISAS_NORETURN;
4949 l = tcg_const_i32(l1);
4950 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4951 tcg_temp_free_i32(l);
4952 set_cc_static(s);
4953 return DISAS_NEXT;
4957 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4959 int d1 = get_field(s, d1);
4960 int d2 = get_field(s, d2);
4961 int b1 = get_field(s, b1);
4962 int b2 = get_field(s, b2);
4963 int l = get_field(s, l1);
4964 TCGv_i32 t32;
4966 o->addr1 = get_address(s, 0, b1, d1);
4968 /* If the addresses are identical, this is a store/memset of zero. */
4969 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4970 o->in2 = tcg_const_i64(0);
4972 l++;
4973 while (l >= 8) {
4974 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4975 l -= 8;
4976 if (l > 0) {
4977 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4980 if (l >= 4) {
4981 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4982 l -= 4;
4983 if (l > 0) {
4984 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4987 if (l >= 2) {
4988 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4989 l -= 2;
4990 if (l > 0) {
4991 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4994 if (l) {
4995 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4997 gen_op_movi_cc(s, 0);
4998 return DISAS_NEXT;
5001 /* But in general we'll defer to a helper. */
5002 o->in2 = get_address(s, 0, b2, d2);
5003 t32 = tcg_const_i32(l);
5004 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5005 tcg_temp_free_i32(t32);
5006 set_cc_static(s);
5007 return DISAS_NEXT;
5010 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5012 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5013 return DISAS_NEXT;
5016 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5018 int shift = s->insn->data & 0xff;
5019 int size = s->insn->data >> 8;
5020 uint64_t mask = ((1ull << size) - 1) << shift;
5022 assert(!o->g_in2);
5023 tcg_gen_shli_i64(o->in2, o->in2, shift);
5024 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5026 /* Produce the CC from only the bits manipulated. */
5027 tcg_gen_andi_i64(cc_dst, o->out, mask);
5028 set_cc_nz_u64(s, cc_dst);
5029 return DISAS_NEXT;
5032 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5034 o->in1 = tcg_temp_new_i64();
5036 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5037 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5038 } else {
5039 /* Perform the atomic operation in memory. */
5040 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5041 s->insn->data);
5044 /* Recompute also for atomic case: needed for setting CC. */
5045 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5047 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5048 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5050 return DISAS_NEXT;
5053 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5055 o->out = tcg_const_i64(0);
5056 return DISAS_NEXT;
5059 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5061 o->out = tcg_const_i64(0);
5062 o->out2 = o->out;
5063 o->g_out2 = true;
5064 return DISAS_NEXT;
5067 #ifndef CONFIG_USER_ONLY
5068 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5070 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5072 gen_helper_clp(cpu_env, r2);
5073 tcg_temp_free_i32(r2);
5074 set_cc_static(s);
5075 return DISAS_NEXT;
5078 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5080 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5081 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5083 gen_helper_pcilg(cpu_env, r1, r2);
5084 tcg_temp_free_i32(r1);
5085 tcg_temp_free_i32(r2);
5086 set_cc_static(s);
5087 return DISAS_NEXT;
5090 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5092 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5093 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5095 gen_helper_pcistg(cpu_env, r1, r2);
5096 tcg_temp_free_i32(r1);
5097 tcg_temp_free_i32(r2);
5098 set_cc_static(s);
5099 return DISAS_NEXT;
5102 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5104 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5105 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5107 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5108 tcg_temp_free_i32(ar);
5109 tcg_temp_free_i32(r1);
5110 set_cc_static(s);
5111 return DISAS_NEXT;
5114 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5116 gen_helper_sic(cpu_env, o->in1, o->in2);
5117 return DISAS_NEXT;
5120 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5122 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5123 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5125 gen_helper_rpcit(cpu_env, r1, r2);
5126 tcg_temp_free_i32(r1);
5127 tcg_temp_free_i32(r2);
5128 set_cc_static(s);
5129 return DISAS_NEXT;
5132 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5134 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5135 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5136 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5138 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5139 tcg_temp_free_i32(ar);
5140 tcg_temp_free_i32(r1);
5141 tcg_temp_free_i32(r3);
5142 set_cc_static(s);
5143 return DISAS_NEXT;
5146 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5148 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5149 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5151 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5152 tcg_temp_free_i32(ar);
5153 tcg_temp_free_i32(r1);
5154 set_cc_static(s);
5155 return DISAS_NEXT;
5157 #endif
5159 #include "translate_vx.c.inc"
5161 /* ====================================================================== */
5162 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5163 the original inputs), update the various cc data structures in order to
5164 be able to compute the new condition code. */
5166 static void cout_abs32(DisasContext *s, DisasOps *o)
5168 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5171 static void cout_abs64(DisasContext *s, DisasOps *o)
5173 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5176 static void cout_adds32(DisasContext *s, DisasOps *o)
5178 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5181 static void cout_adds64(DisasContext *s, DisasOps *o)
5183 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5186 static void cout_addu32(DisasContext *s, DisasOps *o)
5188 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5191 static void cout_addu64(DisasContext *s, DisasOps *o)
5193 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5196 static void cout_addc32(DisasContext *s, DisasOps *o)
5198 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5201 static void cout_addc64(DisasContext *s, DisasOps *o)
5203 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5206 static void cout_cmps32(DisasContext *s, DisasOps *o)
5208 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5211 static void cout_cmps64(DisasContext *s, DisasOps *o)
5213 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5216 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5218 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5221 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5223 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5226 static void cout_f32(DisasContext *s, DisasOps *o)
5228 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5231 static void cout_f64(DisasContext *s, DisasOps *o)
5233 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5236 static void cout_f128(DisasContext *s, DisasOps *o)
5238 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5241 static void cout_nabs32(DisasContext *s, DisasOps *o)
5243 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5246 static void cout_nabs64(DisasContext *s, DisasOps *o)
5248 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5251 static void cout_neg32(DisasContext *s, DisasOps *o)
5253 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5256 static void cout_neg64(DisasContext *s, DisasOps *o)
5258 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5261 static void cout_nz32(DisasContext *s, DisasOps *o)
5263 tcg_gen_ext32u_i64(cc_dst, o->out);
5264 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5267 static void cout_nz64(DisasContext *s, DisasOps *o)
5269 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5272 static void cout_s32(DisasContext *s, DisasOps *o)
5274 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5277 static void cout_s64(DisasContext *s, DisasOps *o)
5279 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5282 static void cout_subs32(DisasContext *s, DisasOps *o)
5284 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5287 static void cout_subs64(DisasContext *s, DisasOps *o)
5289 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5292 static void cout_subu32(DisasContext *s, DisasOps *o)
5294 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5297 static void cout_subu64(DisasContext *s, DisasOps *o)
5299 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5302 static void cout_subb32(DisasContext *s, DisasOps *o)
5304 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5307 static void cout_subb64(DisasContext *s, DisasOps *o)
5309 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5312 static void cout_tm32(DisasContext *s, DisasOps *o)
5314 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5317 static void cout_tm64(DisasContext *s, DisasOps *o)
5319 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5322 static void cout_muls32(DisasContext *s, DisasOps *o)
5324 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5327 static void cout_muls64(DisasContext *s, DisasOps *o)
5329 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5330 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5333 /* ====================================================================== */
5334 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5335 with the TCG register to which we will write. Used in combination with
5336 the "wout" generators, in some cases we need a new temporary, and in
5337 some cases we can write to a TCG global. */
5339 static void prep_new(DisasContext *s, DisasOps *o)
5341 o->out = tcg_temp_new_i64();
5343 #define SPEC_prep_new 0
5345 static void prep_new_P(DisasContext *s, DisasOps *o)
5347 o->out = tcg_temp_new_i64();
5348 o->out2 = tcg_temp_new_i64();
5350 #define SPEC_prep_new_P 0
5352 static void prep_r1(DisasContext *s, DisasOps *o)
5354 o->out = regs[get_field(s, r1)];
5355 o->g_out = true;
5357 #define SPEC_prep_r1 0
5359 static void prep_r1_P(DisasContext *s, DisasOps *o)
5361 int r1 = get_field(s, r1);
5362 o->out = regs[r1];
5363 o->out2 = regs[r1 + 1];
5364 o->g_out = o->g_out2 = true;
5366 #define SPEC_prep_r1_P SPEC_r1_even
5368 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5369 static void prep_x1(DisasContext *s, DisasOps *o)
5371 o->out = load_freg(get_field(s, r1));
5372 o->out2 = load_freg(get_field(s, r1) + 2);
5374 #define SPEC_prep_x1 SPEC_r1_f128
5376 /* ====================================================================== */
5377 /* The "Write OUTput" generators. These generally perform some non-trivial
5378 copy of data to TCG globals, or to main memory. The trivial cases are
5379 generally handled by having a "prep" generator install the TCG global
5380 as the destination of the operation. */
5382 static void wout_r1(DisasContext *s, DisasOps *o)
5384 store_reg(get_field(s, r1), o->out);
5386 #define SPEC_wout_r1 0
5388 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5390 store_reg(get_field(s, r1), o->out2);
5392 #define SPEC_wout_out2_r1 0
5394 static void wout_r1_8(DisasContext *s, DisasOps *o)
5396 int r1 = get_field(s, r1);
5397 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5399 #define SPEC_wout_r1_8 0
5401 static void wout_r1_16(DisasContext *s, DisasOps *o)
5403 int r1 = get_field(s, r1);
5404 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5406 #define SPEC_wout_r1_16 0
5408 static void wout_r1_32(DisasContext *s, DisasOps *o)
5410 store_reg32_i64(get_field(s, r1), o->out);
5412 #define SPEC_wout_r1_32 0
5414 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5416 store_reg32h_i64(get_field(s, r1), o->out);
5418 #define SPEC_wout_r1_32h 0
5420 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5422 int r1 = get_field(s, r1);
5423 store_reg32_i64(r1, o->out);
5424 store_reg32_i64(r1 + 1, o->out2);
5426 #define SPEC_wout_r1_P32 SPEC_r1_even
5428 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5430 int r1 = get_field(s, r1);
5431 store_reg32_i64(r1 + 1, o->out);
5432 tcg_gen_shri_i64(o->out, o->out, 32);
5433 store_reg32_i64(r1, o->out);
5435 #define SPEC_wout_r1_D32 SPEC_r1_even
5437 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5439 int r3 = get_field(s, r3);
5440 store_reg32_i64(r3, o->out);
5441 store_reg32_i64(r3 + 1, o->out2);
5443 #define SPEC_wout_r3_P32 SPEC_r3_even
5445 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5447 int r3 = get_field(s, r3);
5448 store_reg(r3, o->out);
5449 store_reg(r3 + 1, o->out2);
5451 #define SPEC_wout_r3_P64 SPEC_r3_even
5453 static void wout_e1(DisasContext *s, DisasOps *o)
5455 store_freg32_i64(get_field(s, r1), o->out);
5457 #define SPEC_wout_e1 0
5459 static void wout_f1(DisasContext *s, DisasOps *o)
5461 store_freg(get_field(s, r1), o->out);
5463 #define SPEC_wout_f1 0
5465 static void wout_x1(DisasContext *s, DisasOps *o)
5467 int f1 = get_field(s, r1);
5468 store_freg(f1, o->out);
5469 store_freg(f1 + 2, o->out2);
5471 #define SPEC_wout_x1 SPEC_r1_f128
5473 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5475 if (get_field(s, r1) != get_field(s, r2)) {
5476 store_reg32_i64(get_field(s, r1), o->out);
5479 #define SPEC_wout_cond_r1r2_32 0
5481 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5483 if (get_field(s, r1) != get_field(s, r2)) {
5484 store_freg32_i64(get_field(s, r1), o->out);
5487 #define SPEC_wout_cond_e1e2 0
5489 static void wout_m1_8(DisasContext *s, DisasOps *o)
5491 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5493 #define SPEC_wout_m1_8 0
5495 static void wout_m1_16(DisasContext *s, DisasOps *o)
5497 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5499 #define SPEC_wout_m1_16 0
5501 #ifndef CONFIG_USER_ONLY
5502 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5504 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5506 #define SPEC_wout_m1_16a 0
5507 #endif
5509 static void wout_m1_32(DisasContext *s, DisasOps *o)
5511 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5513 #define SPEC_wout_m1_32 0
5515 #ifndef CONFIG_USER_ONLY
5516 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5518 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5520 #define SPEC_wout_m1_32a 0
5521 #endif
5523 static void wout_m1_64(DisasContext *s, DisasOps *o)
5525 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5527 #define SPEC_wout_m1_64 0
5529 #ifndef CONFIG_USER_ONLY
5530 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5532 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5534 #define SPEC_wout_m1_64a 0
5535 #endif
5537 static void wout_m2_32(DisasContext *s, DisasOps *o)
5539 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5541 #define SPEC_wout_m2_32 0
5543 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5545 store_reg(get_field(s, r1), o->in2);
5547 #define SPEC_wout_in2_r1 0
5549 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5551 store_reg32_i64(get_field(s, r1), o->in2);
5553 #define SPEC_wout_in2_r1_32 0
5555 /* ====================================================================== */
5556 /* The "INput 1" generators. These load the first operand to an insn. */
5558 static void in1_r1(DisasContext *s, DisasOps *o)
5560 o->in1 = load_reg(get_field(s, r1));
5562 #define SPEC_in1_r1 0
5564 static void in1_r1_o(DisasContext *s, DisasOps *o)
5566 o->in1 = regs[get_field(s, r1)];
5567 o->g_in1 = true;
5569 #define SPEC_in1_r1_o 0
5571 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5573 o->in1 = tcg_temp_new_i64();
5574 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5576 #define SPEC_in1_r1_32s 0
5578 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5580 o->in1 = tcg_temp_new_i64();
5581 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5583 #define SPEC_in1_r1_32u 0
5585 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5587 o->in1 = tcg_temp_new_i64();
5588 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5590 #define SPEC_in1_r1_sr32 0
5592 static void in1_r1p1(DisasContext *s, DisasOps *o)
5594 o->in1 = load_reg(get_field(s, r1) + 1);
5596 #define SPEC_in1_r1p1 SPEC_r1_even
5598 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5600 o->in1 = regs[get_field(s, r1) + 1];
5601 o->g_in1 = true;
5603 #define SPEC_in1_r1p1_o SPEC_r1_even
5605 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5607 o->in1 = tcg_temp_new_i64();
5608 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5610 #define SPEC_in1_r1p1_32s SPEC_r1_even
5612 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5614 o->in1 = tcg_temp_new_i64();
5615 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5617 #define SPEC_in1_r1p1_32u SPEC_r1_even
5619 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5621 int r1 = get_field(s, r1);
5622 o->in1 = tcg_temp_new_i64();
5623 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5625 #define SPEC_in1_r1_D32 SPEC_r1_even
5627 static void in1_r2(DisasContext *s, DisasOps *o)
5629 o->in1 = load_reg(get_field(s, r2));
5631 #define SPEC_in1_r2 0
5633 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5635 o->in1 = tcg_temp_new_i64();
5636 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5638 #define SPEC_in1_r2_sr32 0
5640 static void in1_r3(DisasContext *s, DisasOps *o)
5642 o->in1 = load_reg(get_field(s, r3));
5644 #define SPEC_in1_r3 0
5646 static void in1_r3_o(DisasContext *s, DisasOps *o)
5648 o->in1 = regs[get_field(s, r3)];
5649 o->g_in1 = true;
5651 #define SPEC_in1_r3_o 0
5653 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5655 o->in1 = tcg_temp_new_i64();
5656 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5658 #define SPEC_in1_r3_32s 0
5660 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5662 o->in1 = tcg_temp_new_i64();
5663 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5665 #define SPEC_in1_r3_32u 0
5667 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5669 int r3 = get_field(s, r3);
5670 o->in1 = tcg_temp_new_i64();
5671 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5673 #define SPEC_in1_r3_D32 SPEC_r3_even
5675 static void in1_e1(DisasContext *s, DisasOps *o)
5677 o->in1 = load_freg32_i64(get_field(s, r1));
5679 #define SPEC_in1_e1 0
5681 static void in1_f1(DisasContext *s, DisasOps *o)
5683 o->in1 = load_freg(get_field(s, r1));
5685 #define SPEC_in1_f1 0
5687 /* Load the high double word of an extended (128-bit) format FP number */
5688 static void in1_x2h(DisasContext *s, DisasOps *o)
5690 o->in1 = load_freg(get_field(s, r2));
5692 #define SPEC_in1_x2h SPEC_r2_f128
5694 static void in1_f3(DisasContext *s, DisasOps *o)
5696 o->in1 = load_freg(get_field(s, r3));
5698 #define SPEC_in1_f3 0
5700 static void in1_la1(DisasContext *s, DisasOps *o)
5702 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5704 #define SPEC_in1_la1 0
5706 static void in1_la2(DisasContext *s, DisasOps *o)
5708 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5709 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5711 #define SPEC_in1_la2 0
5713 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5715 in1_la1(s, o);
5716 o->in1 = tcg_temp_new_i64();
5717 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5719 #define SPEC_in1_m1_8u 0
5721 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5723 in1_la1(s, o);
5724 o->in1 = tcg_temp_new_i64();
5725 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5727 #define SPEC_in1_m1_16s 0
5729 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5731 in1_la1(s, o);
5732 o->in1 = tcg_temp_new_i64();
5733 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5735 #define SPEC_in1_m1_16u 0
5737 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5739 in1_la1(s, o);
5740 o->in1 = tcg_temp_new_i64();
5741 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5743 #define SPEC_in1_m1_32s 0
5745 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5747 in1_la1(s, o);
5748 o->in1 = tcg_temp_new_i64();
5749 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5751 #define SPEC_in1_m1_32u 0
5753 static void in1_m1_64(DisasContext *s, DisasOps *o)
5755 in1_la1(s, o);
5756 o->in1 = tcg_temp_new_i64();
5757 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5759 #define SPEC_in1_m1_64 0
5761 /* ====================================================================== */
5762 /* The "INput 2" generators. These load the second operand to an insn. */
5764 static void in2_r1_o(DisasContext *s, DisasOps *o)
5766 o->in2 = regs[get_field(s, r1)];
5767 o->g_in2 = true;
5769 #define SPEC_in2_r1_o 0
5771 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5773 o->in2 = tcg_temp_new_i64();
5774 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5776 #define SPEC_in2_r1_16u 0
5778 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5780 o->in2 = tcg_temp_new_i64();
5781 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5783 #define SPEC_in2_r1_32u 0
5785 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5787 int r1 = get_field(s, r1);
5788 o->in2 = tcg_temp_new_i64();
5789 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5791 #define SPEC_in2_r1_D32 SPEC_r1_even
5793 static void in2_r2(DisasContext *s, DisasOps *o)
5795 o->in2 = load_reg(get_field(s, r2));
5797 #define SPEC_in2_r2 0
5799 static void in2_r2_o(DisasContext *s, DisasOps *o)
5801 o->in2 = regs[get_field(s, r2)];
5802 o->g_in2 = true;
5804 #define SPEC_in2_r2_o 0
5806 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5808 int r2 = get_field(s, r2);
5809 if (r2 != 0) {
5810 o->in2 = load_reg(r2);
5813 #define SPEC_in2_r2_nz 0
5815 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5817 o->in2 = tcg_temp_new_i64();
5818 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5820 #define SPEC_in2_r2_8s 0
5822 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5824 o->in2 = tcg_temp_new_i64();
5825 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5827 #define SPEC_in2_r2_8u 0
5829 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5831 o->in2 = tcg_temp_new_i64();
5832 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5834 #define SPEC_in2_r2_16s 0
5836 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5838 o->in2 = tcg_temp_new_i64();
5839 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5841 #define SPEC_in2_r2_16u 0
5843 static void in2_r3(DisasContext *s, DisasOps *o)
5845 o->in2 = load_reg(get_field(s, r3));
5847 #define SPEC_in2_r3 0
5849 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5851 o->in2 = tcg_temp_new_i64();
5852 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5854 #define SPEC_in2_r3_sr32 0
5856 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5858 o->in2 = tcg_temp_new_i64();
5859 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5861 #define SPEC_in2_r3_32u 0
5863 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5865 o->in2 = tcg_temp_new_i64();
5866 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5868 #define SPEC_in2_r2_32s 0
5870 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5872 o->in2 = tcg_temp_new_i64();
5873 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5875 #define SPEC_in2_r2_32u 0
5877 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5879 o->in2 = tcg_temp_new_i64();
5880 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5882 #define SPEC_in2_r2_sr32 0
5884 static void in2_e2(DisasContext *s, DisasOps *o)
5886 o->in2 = load_freg32_i64(get_field(s, r2));
5888 #define SPEC_in2_e2 0
5890 static void in2_f2(DisasContext *s, DisasOps *o)
5892 o->in2 = load_freg(get_field(s, r2));
5894 #define SPEC_in2_f2 0
5896 /* Load the low double word of an extended (128-bit) format FP number */
5897 static void in2_x2l(DisasContext *s, DisasOps *o)
5899 o->in2 = load_freg(get_field(s, r2) + 2);
5901 #define SPEC_in2_x2l SPEC_r2_f128
5903 static void in2_ra2(DisasContext *s, DisasOps *o)
5905 o->in2 = get_address(s, 0, get_field(s, r2), 0);
5907 #define SPEC_in2_ra2 0
5909 static void in2_a2(DisasContext *s, DisasOps *o)
5911 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5912 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5914 #define SPEC_in2_a2 0
5916 static void in2_ri2(DisasContext *s, DisasOps *o)
5918 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5920 #define SPEC_in2_ri2 0
5922 static void in2_sh32(DisasContext *s, DisasOps *o)
5924 help_l2_shift(s, o, 31);
5926 #define SPEC_in2_sh32 0
5928 static void in2_sh64(DisasContext *s, DisasOps *o)
5930 help_l2_shift(s, o, 63);
5932 #define SPEC_in2_sh64 0
5934 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5936 in2_a2(s, o);
5937 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5939 #define SPEC_in2_m2_8u 0
5941 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5943 in2_a2(s, o);
5944 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5946 #define SPEC_in2_m2_16s 0
5948 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5950 in2_a2(s, o);
5951 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5953 #define SPEC_in2_m2_16u 0
5955 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5957 in2_a2(s, o);
5958 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5960 #define SPEC_in2_m2_32s 0
5962 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5964 in2_a2(s, o);
5965 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5967 #define SPEC_in2_m2_32u 0
5969 #ifndef CONFIG_USER_ONLY
5970 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5972 in2_a2(s, o);
5973 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5975 #define SPEC_in2_m2_32ua 0
5976 #endif
5978 static void in2_m2_64(DisasContext *s, DisasOps *o)
5980 in2_a2(s, o);
5981 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5983 #define SPEC_in2_m2_64 0
5985 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5987 in2_a2(s, o);
5988 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5989 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5991 #define SPEC_in2_m2_64w 0
5993 #ifndef CONFIG_USER_ONLY
5994 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5996 in2_a2(s, o);
5997 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5999 #define SPEC_in2_m2_64a 0
6000 #endif
6002 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6004 in2_ri2(s, o);
6005 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6007 #define SPEC_in2_mri2_16u 0
6009 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6011 in2_ri2(s, o);
6012 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6014 #define SPEC_in2_mri2_32s 0
6016 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6018 in2_ri2(s, o);
6019 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6021 #define SPEC_in2_mri2_32u 0
6023 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6025 in2_ri2(s, o);
6026 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6028 #define SPEC_in2_mri2_64 0
6030 static void in2_i2(DisasContext *s, DisasOps *o)
6032 o->in2 = tcg_const_i64(get_field(s, i2));
6034 #define SPEC_in2_i2 0
6036 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6038 o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6040 #define SPEC_in2_i2_8u 0
6042 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6044 o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6046 #define SPEC_in2_i2_16u 0
6048 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6050 o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6052 #define SPEC_in2_i2_32u 0
6054 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6056 uint64_t i2 = (uint16_t)get_field(s, i2);
6057 o->in2 = tcg_const_i64(i2 << s->insn->data);
6059 #define SPEC_in2_i2_16u_shl 0
6061 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6063 uint64_t i2 = (uint32_t)get_field(s, i2);
6064 o->in2 = tcg_const_i64(i2 << s->insn->data);
6066 #define SPEC_in2_i2_32u_shl 0
6068 #ifndef CONFIG_USER_ONLY
6069 static void in2_insn(DisasContext *s, DisasOps *o)
6071 o->in2 = tcg_const_i64(s->fields.raw_insn);
6073 #define SPEC_in2_insn 0
6074 #endif
6076 /* ====================================================================== */
6078 /* Find opc within the table of insns. This is formulated as a switch
6079 statement so that (1) we get compile-time notice of cut-paste errors
6080 for duplicated opcodes, and (2) the compiler generates the binary
6081 search tree, rather than us having to post-process the table. */
6083 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6084 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6086 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6087 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6089 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6090 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6092 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6094 enum DisasInsnEnum {
6095 #include "insn-data.def"
6098 #undef E
6099 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6100 .opc = OPC, \
6101 .flags = FL, \
6102 .fmt = FMT_##FT, \
6103 .fac = FAC_##FC, \
6104 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6105 .name = #NM, \
6106 .help_in1 = in1_##I1, \
6107 .help_in2 = in2_##I2, \
6108 .help_prep = prep_##P, \
6109 .help_wout = wout_##W, \
6110 .help_cout = cout_##CC, \
6111 .help_op = op_##OP, \
6112 .data = D \
6115 /* Allow 0 to be used for NULL in the table below. */
6116 #define in1_0 NULL
6117 #define in2_0 NULL
6118 #define prep_0 NULL
6119 #define wout_0 NULL
6120 #define cout_0 NULL
6121 #define op_0 NULL
6123 #define SPEC_in1_0 0
6124 #define SPEC_in2_0 0
6125 #define SPEC_prep_0 0
6126 #define SPEC_wout_0 0
6128 /* Give smaller names to the various facilities. */
6129 #define FAC_Z S390_FEAT_ZARCH
6130 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6131 #define FAC_DFP S390_FEAT_DFP
6132 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6133 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6134 #define FAC_EE S390_FEAT_EXECUTE_EXT
6135 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6136 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6137 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6138 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6139 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6140 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6141 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6142 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6143 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6144 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6145 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6146 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6147 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6148 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6149 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6150 #define FAC_SFLE S390_FEAT_STFLE
6151 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6152 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6153 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6154 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6155 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6156 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6157 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6158 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6159 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6160 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6161 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6162 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6163 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6164 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6165 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6166 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6167 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6168 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6169 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6170 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6172 static const DisasInsn insn_info[] = {
6173 #include "insn-data.def"
6176 #undef E
6177 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6178 case OPC: return &insn_info[insn_ ## NM];
6180 static const DisasInsn *lookup_opc(uint16_t opc)
6182 switch (opc) {
6183 #include "insn-data.def"
6184 default:
6185 return NULL;
6189 #undef F
6190 #undef E
6191 #undef D
6192 #undef C
6194 /* Extract a field from the insn. The INSN should be left-aligned in
6195 the uint64_t so that we can more easily utilize the big-bit-endian
6196 definitions we extract from the Principals of Operation. */
6198 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6200 uint32_t r, m;
6202 if (f->size == 0) {
6203 return;
6206 /* Zero extract the field from the insn. */
6207 r = (insn << f->beg) >> (64 - f->size);
6209 /* Sign-extend, or un-swap the field as necessary. */
6210 switch (f->type) {
6211 case 0: /* unsigned */
6212 break;
6213 case 1: /* signed */
6214 assert(f->size <= 32);
6215 m = 1u << (f->size - 1);
6216 r = (r ^ m) - m;
6217 break;
6218 case 2: /* dl+dh split, signed 20 bit. */
6219 r = ((int8_t)r << 12) | (r >> 8);
6220 break;
6221 case 3: /* MSB stored in RXB */
6222 g_assert(f->size == 4);
6223 switch (f->beg) {
6224 case 8:
6225 r |= extract64(insn, 63 - 36, 1) << 4;
6226 break;
6227 case 12:
6228 r |= extract64(insn, 63 - 37, 1) << 4;
6229 break;
6230 case 16:
6231 r |= extract64(insn, 63 - 38, 1) << 4;
6232 break;
6233 case 32:
6234 r |= extract64(insn, 63 - 39, 1) << 4;
6235 break;
6236 default:
6237 g_assert_not_reached();
6239 break;
6240 default:
6241 abort();
6244 /* Validate that the "compressed" encoding we selected above is valid.
6245 I.e. we havn't make two different original fields overlap. */
6246 assert(((o->presentC >> f->indexC) & 1) == 0);
6247 o->presentC |= 1 << f->indexC;
6248 o->presentO |= 1 << f->indexO;
6250 o->c[f->indexC] = r;
6253 /* Lookup the insn at the current PC, extracting the operands into O and
6254 returning the info struct for the insn. Returns NULL for invalid insn. */
6256 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6258 uint64_t insn, pc = s->base.pc_next;
6259 int op, op2, ilen;
6260 const DisasInsn *info;
6262 if (unlikely(s->ex_value)) {
6263 /* Drop the EX data now, so that it's clear on exception paths. */
6264 TCGv_i64 zero = tcg_const_i64(0);
6265 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6266 tcg_temp_free_i64(zero);
6268 /* Extract the values saved by EXECUTE. */
6269 insn = s->ex_value & 0xffffffffffff0000ull;
6270 ilen = s->ex_value & 0xf;
6271 op = insn >> 56;
6272 } else {
6273 insn = ld_code2(env, pc);
6274 op = (insn >> 8) & 0xff;
6275 ilen = get_ilen(op);
6276 switch (ilen) {
6277 case 2:
6278 insn = insn << 48;
6279 break;
6280 case 4:
6281 insn = ld_code4(env, pc) << 32;
6282 break;
6283 case 6:
6284 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6285 break;
6286 default:
6287 g_assert_not_reached();
6290 s->pc_tmp = s->base.pc_next + ilen;
6291 s->ilen = ilen;
6293 /* We can't actually determine the insn format until we've looked up
6294 the full insn opcode. Which we can't do without locating the
6295 secondary opcode. Assume by default that OP2 is at bit 40; for
6296 those smaller insns that don't actually have a secondary opcode
6297 this will correctly result in OP2 = 0. */
6298 switch (op) {
6299 case 0x01: /* E */
6300 case 0x80: /* S */
6301 case 0x82: /* S */
6302 case 0x93: /* S */
6303 case 0xb2: /* S, RRF, RRE, IE */
6304 case 0xb3: /* RRE, RRD, RRF */
6305 case 0xb9: /* RRE, RRF */
6306 case 0xe5: /* SSE, SIL */
6307 op2 = (insn << 8) >> 56;
6308 break;
6309 case 0xa5: /* RI */
6310 case 0xa7: /* RI */
6311 case 0xc0: /* RIL */
6312 case 0xc2: /* RIL */
6313 case 0xc4: /* RIL */
6314 case 0xc6: /* RIL */
6315 case 0xc8: /* SSF */
6316 case 0xcc: /* RIL */
6317 op2 = (insn << 12) >> 60;
6318 break;
6319 case 0xc5: /* MII */
6320 case 0xc7: /* SMI */
6321 case 0xd0 ... 0xdf: /* SS */
6322 case 0xe1: /* SS */
6323 case 0xe2: /* SS */
6324 case 0xe8: /* SS */
6325 case 0xe9: /* SS */
6326 case 0xea: /* SS */
6327 case 0xee ... 0xf3: /* SS */
6328 case 0xf8 ... 0xfd: /* SS */
6329 op2 = 0;
6330 break;
6331 default:
6332 op2 = (insn << 40) >> 56;
6333 break;
6336 memset(&s->fields, 0, sizeof(s->fields));
6337 s->fields.raw_insn = insn;
6338 s->fields.op = op;
6339 s->fields.op2 = op2;
6341 /* Lookup the instruction. */
6342 info = lookup_opc(op << 8 | op2);
6343 s->insn = info;
6345 /* If we found it, extract the operands. */
6346 if (info != NULL) {
6347 DisasFormat fmt = info->fmt;
6348 int i;
6350 for (i = 0; i < NUM_C_FIELD; ++i) {
6351 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6354 return info;
6357 static bool is_afp_reg(int reg)
6359 return reg % 2 || reg > 6;
6362 static bool is_fp_pair(int reg)
6364 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6365 return !(reg & 0x2);
6368 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6370 const DisasInsn *insn;
6371 DisasJumpType ret = DISAS_NEXT;
6372 DisasOps o = {};
6373 bool icount = false;
6375 /* Search for the insn in the table. */
6376 insn = extract_insn(env, s);
6378 /* Emit insn_start now that we know the ILEN. */
6379 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6381 /* Not found means unimplemented/illegal opcode. */
6382 if (insn == NULL) {
6383 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6384 s->fields.op, s->fields.op2);
6385 gen_illegal_opcode(s);
6386 return DISAS_NORETURN;
6389 #ifndef CONFIG_USER_ONLY
6390 if (s->base.tb->flags & FLAG_MASK_PER) {
6391 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6392 gen_helper_per_ifetch(cpu_env, addr);
6393 tcg_temp_free_i64(addr);
6395 #endif
6397 /* process flags */
6398 if (insn->flags) {
6399 /* privileged instruction */
6400 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6401 gen_program_exception(s, PGM_PRIVILEGED);
6402 return DISAS_NORETURN;
6405 /* if AFP is not enabled, instructions and registers are forbidden */
6406 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6407 uint8_t dxc = 0;
6409 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6410 dxc = 1;
6412 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6413 dxc = 1;
6415 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6416 dxc = 1;
6418 if (insn->flags & IF_BFP) {
6419 dxc = 2;
6421 if (insn->flags & IF_DFP) {
6422 dxc = 3;
6424 if (insn->flags & IF_VEC) {
6425 dxc = 0xfe;
6427 if (dxc) {
6428 gen_data_exception(dxc);
6429 return DISAS_NORETURN;
6433 /* if vector instructions not enabled, executing them is forbidden */
6434 if (insn->flags & IF_VEC) {
6435 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6436 gen_data_exception(0xfe);
6437 return DISAS_NORETURN;
6441 /* input/output is the special case for icount mode */
6442 if (unlikely(insn->flags & IF_IO)) {
6443 icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6444 if (icount) {
6445 gen_io_start();
6450 /* Check for insn specification exceptions. */
6451 if (insn->spec) {
6452 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6453 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6454 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6455 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6456 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6457 gen_program_exception(s, PGM_SPECIFICATION);
6458 return DISAS_NORETURN;
6462 /* Implement the instruction. */
6463 if (insn->help_in1) {
6464 insn->help_in1(s, &o);
6466 if (insn->help_in2) {
6467 insn->help_in2(s, &o);
6469 if (insn->help_prep) {
6470 insn->help_prep(s, &o);
6472 if (insn->help_op) {
6473 ret = insn->help_op(s, &o);
6475 if (ret != DISAS_NORETURN) {
6476 if (insn->help_wout) {
6477 insn->help_wout(s, &o);
6479 if (insn->help_cout) {
6480 insn->help_cout(s, &o);
6484 /* Free any temporaries created by the helpers. */
6485 if (o.out && !o.g_out) {
6486 tcg_temp_free_i64(o.out);
6488 if (o.out2 && !o.g_out2) {
6489 tcg_temp_free_i64(o.out2);
6491 if (o.in1 && !o.g_in1) {
6492 tcg_temp_free_i64(o.in1);
6494 if (o.in2 && !o.g_in2) {
6495 tcg_temp_free_i64(o.in2);
6497 if (o.addr1) {
6498 tcg_temp_free_i64(o.addr1);
6501 /* io should be the last instruction in tb when icount is enabled */
6502 if (unlikely(icount && ret == DISAS_NEXT)) {
6503 ret = DISAS_PC_STALE;
6506 #ifndef CONFIG_USER_ONLY
6507 if (s->base.tb->flags & FLAG_MASK_PER) {
6508 /* An exception might be triggered, save PSW if not already done. */
6509 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6510 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6513 /* Call the helper to check for a possible PER exception. */
6514 gen_helper_per_check_exception(cpu_env);
6516 #endif
6518 /* Advance to the next instruction. */
6519 s->base.pc_next = s->pc_tmp;
6520 return ret;
6523 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6525 DisasContext *dc = container_of(dcbase, DisasContext, base);
6527 /* 31-bit mode */
6528 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6529 dc->base.pc_first &= 0x7fffffff;
6530 dc->base.pc_next = dc->base.pc_first;
6533 dc->cc_op = CC_OP_DYNAMIC;
6534 dc->ex_value = dc->base.tb->cs_base;
6535 dc->do_debug = dc->base.singlestep_enabled;
6538 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6542 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6546 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6547 const CPUBreakpoint *bp)
6549 DisasContext *dc = container_of(dcbase, DisasContext, base);
6552 * Emit an insn_start to accompany the breakpoint exception.
6553 * The ILEN value is a dummy, since this does not result in
6554 * an s390x exception, but an internal qemu exception which
6555 * brings us back to interact with the gdbstub.
6557 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6559 dc->base.is_jmp = DISAS_PC_STALE;
6560 dc->do_debug = true;
6561 /* The address covered by the breakpoint must be included in
6562 [tb->pc, tb->pc + tb->size) in order to for it to be
6563 properly cleared -- thus we increment the PC here so that
6564 the logic setting tb->size does the right thing. */
6565 dc->base.pc_next += 2;
6566 return true;
6569 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6571 CPUS390XState *env = cs->env_ptr;
6572 DisasContext *dc = container_of(dcbase, DisasContext, base);
6574 dc->base.is_jmp = translate_one(env, dc);
6575 if (dc->base.is_jmp == DISAS_NEXT) {
6576 uint64_t page_start;
6578 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6579 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6580 dc->base.is_jmp = DISAS_TOO_MANY;
6585 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6587 DisasContext *dc = container_of(dcbase, DisasContext, base);
6589 switch (dc->base.is_jmp) {
6590 case DISAS_GOTO_TB:
6591 case DISAS_NORETURN:
6592 break;
6593 case DISAS_TOO_MANY:
6594 case DISAS_PC_STALE:
6595 case DISAS_PC_STALE_NOCHAIN:
6596 update_psw_addr(dc);
6597 /* FALLTHRU */
6598 case DISAS_PC_UPDATED:
6599 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6600 cc op type is in env */
6601 update_cc_op(dc);
6602 /* FALLTHRU */
6603 case DISAS_PC_CC_UPDATED:
6604 /* Exit the TB, either by raising a debug exception or by return. */
6605 if (dc->do_debug) {
6606 gen_exception(EXCP_DEBUG);
6607 } else if (use_exit_tb(dc) ||
6608 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6609 tcg_gen_exit_tb(NULL, 0);
6610 } else {
6611 tcg_gen_lookup_and_goto_ptr();
6613 break;
6614 default:
6615 g_assert_not_reached();
6619 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6621 DisasContext *dc = container_of(dcbase, DisasContext, base);
6623 if (unlikely(dc->ex_value)) {
6624 /* ??? Unfortunately log_target_disas can't use host memory. */
6625 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6626 } else {
6627 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6628 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6632 static const TranslatorOps s390x_tr_ops = {
6633 .init_disas_context = s390x_tr_init_disas_context,
6634 .tb_start = s390x_tr_tb_start,
6635 .insn_start = s390x_tr_insn_start,
6636 .breakpoint_check = s390x_tr_breakpoint_check,
6637 .translate_insn = s390x_tr_translate_insn,
6638 .tb_stop = s390x_tr_tb_stop,
6639 .disas_log = s390x_tr_disas_log,
6642 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6644 DisasContext dc;
6646 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6649 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6650 target_ulong *data)
6652 int cc_op = data[1];
6654 env->psw.addr = data[0];
6656 /* Update the CC opcode if it is not already up-to-date. */
6657 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6658 env->cc_op = cc_op;
6661 /* Record ILEN. */
6662 env->int_pgm_ilen = data[2];