Merge remote-tracking branch 'remotes/bonzini-gitlab/tags/for-upstream' into staging
[qemu/ar7.git] / target / s390x / translate.c
blob4f953ddfbaf6a38580da4ab8616b1bda1de2a06b
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
64 enum DisasFieldIndexO {
65 FLD_O_r1,
66 FLD_O_r2,
67 FLD_O_r3,
68 FLD_O_m1,
69 FLD_O_m3,
70 FLD_O_m4,
71 FLD_O_m5,
72 FLD_O_m6,
73 FLD_O_b1,
74 FLD_O_b2,
75 FLD_O_b4,
76 FLD_O_d1,
77 FLD_O_d2,
78 FLD_O_d4,
79 FLD_O_x2,
80 FLD_O_l1,
81 FLD_O_l2,
82 FLD_O_i1,
83 FLD_O_i2,
84 FLD_O_i3,
85 FLD_O_i4,
86 FLD_O_i5,
87 FLD_O_v1,
88 FLD_O_v2,
89 FLD_O_v3,
90 FLD_O_v4,
93 enum DisasFieldIndexC {
94 FLD_C_r1 = 0,
95 FLD_C_m1 = 0,
96 FLD_C_b1 = 0,
97 FLD_C_i1 = 0,
98 FLD_C_v1 = 0,
100 FLD_C_r2 = 1,
101 FLD_C_b2 = 1,
102 FLD_C_i2 = 1,
104 FLD_C_r3 = 2,
105 FLD_C_m3 = 2,
106 FLD_C_i3 = 2,
107 FLD_C_v3 = 2,
109 FLD_C_m4 = 3,
110 FLD_C_b4 = 3,
111 FLD_C_i4 = 3,
112 FLD_C_l1 = 3,
113 FLD_C_v4 = 3,
115 FLD_C_i5 = 4,
116 FLD_C_d1 = 4,
117 FLD_C_m5 = 4,
119 FLD_C_d2 = 5,
120 FLD_C_m6 = 5,
122 FLD_C_d4 = 6,
123 FLD_C_x2 = 6,
124 FLD_C_l2 = 6,
125 FLD_C_v2 = 6,
127 NUM_C_FIELD = 7
130 struct DisasFields {
131 uint64_t raw_insn;
132 unsigned op:8;
133 unsigned op2:8;
134 unsigned presentC:16;
135 unsigned int presentO;
136 int c[NUM_C_FIELD];
139 struct DisasContext {
140 DisasContextBase base;
141 const DisasInsn *insn;
142 DisasFields fields;
143 uint64_t ex_value;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 bool do_debug;
155 /* Information carried about a condition to be evaluated. */
156 typedef struct {
157 TCGCond cond:8;
158 bool is_64;
159 bool g1;
160 bool g2;
161 union {
162 struct { TCGv_i64 a, b; } s64;
163 struct { TCGv_i32 a, b; } s32;
164 } u;
165 } DisasCompare;
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 TCGv_i64 tmp;
176 if (s->base.tb->flags & FLAG_MASK_32) {
177 if (s->base.tb->flags & FLAG_MASK_64) {
178 tcg_gen_movi_i64(out, pc);
179 return;
181 pc |= 0x80000000;
183 assert(!(s->base.tb->flags & FLAG_MASK_64));
184 tmp = tcg_const_i64(pc);
185 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186 tcg_temp_free_i64(tmp);
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
201 void s390x_translate_init(void)
203 int i;
205 psw_addr = tcg_global_mem_new_i64(cpu_env,
206 offsetof(CPUS390XState, psw.addr),
207 "psw_addr");
208 psw_mask = tcg_global_mem_new_i64(cpu_env,
209 offsetof(CPUS390XState, psw.mask),
210 "psw_mask");
211 gbea = tcg_global_mem_new_i64(cpu_env,
212 offsetof(CPUS390XState, gbea),
213 "gbea");
215 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216 "cc_op");
217 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218 "cc_src");
219 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220 "cc_dst");
221 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222 "cc_vr");
224 for (i = 0; i < 16; i++) {
225 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226 regs[i] = tcg_global_mem_new(cpu_env,
227 offsetof(CPUS390XState, regs[i]),
228 cpu_reg_names[i]);
232 static inline int vec_full_reg_offset(uint8_t reg)
234 g_assert(reg < 32);
235 return offsetof(CPUS390XState, vregs[reg][0]);
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes = 1 << es;
242 int offs = enr * bytes;
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
252 * DW: [ 0] - [ 1]
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
258 * DW: [ 0] - [ 1]
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
266 g_assert(es <= MO_64);
267 #ifndef HOST_WORDS_BIGENDIAN
268 offs ^= (8 - bytes);
269 #endif
270 return offs + vec_full_reg_offset(reg);
273 static inline int freg64_offset(uint8_t reg)
275 g_assert(reg < 16);
276 return vec_reg_offset(reg, 0, MO_64);
279 static inline int freg32_offset(uint8_t reg)
281 g_assert(reg < 16);
282 return vec_reg_offset(reg, 0, MO_32);
285 static TCGv_i64 load_reg(int reg)
287 TCGv_i64 r = tcg_temp_new_i64();
288 tcg_gen_mov_i64(r, regs[reg]);
289 return r;
292 static TCGv_i64 load_freg(int reg)
294 TCGv_i64 r = tcg_temp_new_i64();
296 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297 return r;
300 static TCGv_i64 load_freg32_i64(int reg)
302 TCGv_i64 r = tcg_temp_new_i64();
304 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305 return r;
308 static void store_reg(int reg, TCGv_i64 v)
310 tcg_gen_mov_i64(regs[reg], v);
313 static void store_freg(int reg, TCGv_i64 v)
315 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
318 static void store_reg32_i64(int reg, TCGv_i64 v)
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
326 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
329 static void store_freg32_i64(int reg, TCGv_i64 v)
331 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
334 static void return_low128(TCGv_i64 dest)
336 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
339 static void update_psw_addr(DisasContext *s)
341 /* psw.addr */
342 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 static void per_branch(DisasContext *s, bool to_next)
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea, s->base.pc_next);
350 if (s->base.tb->flags & FLAG_MASK_PER) {
351 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352 gen_helper_per_branch(cpu_env, gbea, next_pc);
353 if (to_next) {
354 tcg_temp_free_i64(next_pc);
357 #endif
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361 TCGv_i64 arg1, TCGv_i64 arg2)
363 #ifndef CONFIG_USER_ONLY
364 if (s->base.tb->flags & FLAG_MASK_PER) {
365 TCGLabel *lab = gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 tcg_gen_movi_i64(gbea, s->base.pc_next);
369 gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 gen_set_label(lab);
372 } else {
373 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 tcg_temp_free_i64(pc);
377 #endif
380 static void per_breaking_event(DisasContext *s)
382 tcg_gen_movi_i64(gbea, s->base.pc_next);
385 static void update_cc_op(DisasContext *s)
387 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388 tcg_gen_movi_i32(cc_op, s->cc_op);
392 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
394 return (uint64_t)cpu_lduw_code(env, pc);
397 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
399 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
402 static int get_mem_index(DisasContext *s)
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 tcg_abort();
420 break;
422 #endif
425 static void gen_exception(int excp)
427 TCGv_i32 tmp = tcg_const_i32(excp);
428 gen_helper_exception(cpu_env, tmp);
429 tcg_temp_free_i32(tmp);
432 static void gen_program_exception(DisasContext *s, int code)
434 TCGv_i32 tmp;
436 /* Remember what pgm exeption this was. */
437 tmp = tcg_const_i32(code);
438 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
439 tcg_temp_free_i32(tmp);
441 tmp = tcg_const_i32(s->ilen);
442 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
443 tcg_temp_free_i32(tmp);
445 /* update the psw */
446 update_psw_addr(s);
448 /* Save off cc. */
449 update_cc_op(s);
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM);
455 static inline void gen_illegal_opcode(DisasContext *s)
457 gen_program_exception(s, PGM_OPERATION);
460 static inline void gen_data_exception(uint8_t dxc)
462 TCGv_i32 tmp = tcg_const_i32(dxc);
463 gen_helper_data_exception(cpu_env, tmp);
464 tcg_temp_free_i32(tmp);
467 static inline void gen_trap(DisasContext *s)
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
473 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
474 int64_t imm)
476 tcg_gen_addi_i64(dst, src, imm);
477 if (!(s->base.tb->flags & FLAG_MASK_64)) {
478 if (s->base.tb->flags & FLAG_MASK_32) {
479 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
480 } else {
481 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
486 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
488 TCGv_i64 tmp = tcg_temp_new_i64();
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
494 if (b2 && x2) {
495 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
496 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
497 } else if (b2) {
498 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
499 } else if (x2) {
500 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
501 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
502 if (s->base.tb->flags & FLAG_MASK_32) {
503 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
504 } else {
505 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
507 } else {
508 tcg_gen_movi_i64(tmp, d2);
511 return tmp;
514 static inline bool live_cc_data(DisasContext *s)
516 return (s->cc_op != CC_OP_DYNAMIC
517 && s->cc_op != CC_OP_STATIC
518 && s->cc_op > 3);
521 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
523 if (live_cc_data(s)) {
524 tcg_gen_discard_i64(cc_src);
525 tcg_gen_discard_i64(cc_dst);
526 tcg_gen_discard_i64(cc_vr);
528 s->cc_op = CC_OP_CONST0 + val;
531 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
533 if (live_cc_data(s)) {
534 tcg_gen_discard_i64(cc_src);
535 tcg_gen_discard_i64(cc_vr);
537 tcg_gen_mov_i64(cc_dst, dst);
538 s->cc_op = op;
541 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542 TCGv_i64 dst)
544 if (live_cc_data(s)) {
545 tcg_gen_discard_i64(cc_vr);
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 s->cc_op = op;
552 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
553 TCGv_i64 dst, TCGv_i64 vr)
555 tcg_gen_mov_i64(cc_src, src);
556 tcg_gen_mov_i64(cc_dst, dst);
557 tcg_gen_mov_i64(cc_vr, vr);
558 s->cc_op = op;
561 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
563 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
566 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
568 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
571 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
573 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
576 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
578 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext *s)
584 if (live_cc_data(s)) {
585 tcg_gen_discard_i64(cc_src);
586 tcg_gen_discard_i64(cc_dst);
587 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext *s)
595 TCGv_i32 local_cc_op = NULL;
596 TCGv_i64 dummy = NULL;
598 switch (s->cc_op) {
599 default:
600 dummy = tcg_const_i64(0);
601 /* FALLTHRU */
602 case CC_OP_ADD_64:
603 case CC_OP_SUB_64:
604 case CC_OP_ADD_32:
605 case CC_OP_SUB_32:
606 local_cc_op = tcg_const_i32(s->cc_op);
607 break;
608 case CC_OP_CONST0:
609 case CC_OP_CONST1:
610 case CC_OP_CONST2:
611 case CC_OP_CONST3:
612 case CC_OP_STATIC:
613 case CC_OP_DYNAMIC:
614 break;
617 switch (s->cc_op) {
618 case CC_OP_CONST0:
619 case CC_OP_CONST1:
620 case CC_OP_CONST2:
621 case CC_OP_CONST3:
622 /* s->cc_op is the cc value */
623 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
624 break;
625 case CC_OP_STATIC:
626 /* env->cc_op already is the cc value */
627 break;
628 case CC_OP_NZ:
629 case CC_OP_ABS_64:
630 case CC_OP_NABS_64:
631 case CC_OP_ABS_32:
632 case CC_OP_NABS_32:
633 case CC_OP_LTGT0_32:
634 case CC_OP_LTGT0_64:
635 case CC_OP_COMP_32:
636 case CC_OP_COMP_64:
637 case CC_OP_NZ_F32:
638 case CC_OP_NZ_F64:
639 case CC_OP_FLOGR:
640 case CC_OP_LCBB:
641 case CC_OP_MULS_32:
642 /* 1 argument */
643 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
644 break;
645 case CC_OP_ADDU:
646 case CC_OP_ICM:
647 case CC_OP_LTGT_32:
648 case CC_OP_LTGT_64:
649 case CC_OP_LTUGTU_32:
650 case CC_OP_LTUGTU_64:
651 case CC_OP_TM_32:
652 case CC_OP_TM_64:
653 case CC_OP_SLA_32:
654 case CC_OP_SLA_64:
655 case CC_OP_SUBU:
656 case CC_OP_NZ_F128:
657 case CC_OP_VC:
658 case CC_OP_MULS_64:
659 /* 2 arguments */
660 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
661 break;
662 case CC_OP_ADD_64:
663 case CC_OP_SUB_64:
664 case CC_OP_ADD_32:
665 case CC_OP_SUB_32:
666 /* 3 arguments */
667 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
668 break;
669 case CC_OP_DYNAMIC:
670 /* unknown operation - assume 3 arguments and cc_op in env */
671 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
672 break;
673 default:
674 tcg_abort();
677 if (local_cc_op) {
678 tcg_temp_free_i32(local_cc_op);
680 if (dummy) {
681 tcg_temp_free_i64(dummy);
684 /* We now have cc in cc_op as constant */
685 set_cc_static(s);
688 static bool use_exit_tb(DisasContext *s)
690 return s->base.singlestep_enabled ||
691 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
692 (s->base.tb->flags & FLAG_MASK_PER);
695 static bool use_goto_tb(DisasContext *s, uint64_t dest)
697 if (unlikely(use_exit_tb(s))) {
698 return false;
700 #ifndef CONFIG_USER_ONLY
701 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
702 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
703 #else
704 return true;
705 #endif
708 static void account_noninline_branch(DisasContext *s, int cc_op)
710 #ifdef DEBUG_INLINE_BRANCHES
711 inline_branch_miss[cc_op]++;
712 #endif
715 static void account_inline_branch(DisasContext *s, int cc_op)
717 #ifdef DEBUG_INLINE_BRANCHES
718 inline_branch_hit[cc_op]++;
719 #endif
722 /* Table of mask values to comparison codes, given a comparison as input.
723 For such, CC=3 should not be possible. */
724 static const TCGCond ltgt_cond[16] = {
725 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
726 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
727 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
728 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
729 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
730 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
731 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
732 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
735 /* Table of mask values to comparison codes, given a logic op as input.
736 For such, only CC=0 and CC=1 should be possible. */
737 static const TCGCond nz_cond[16] = {
738 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
739 TCG_COND_NEVER, TCG_COND_NEVER,
740 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
741 TCG_COND_NE, TCG_COND_NE,
742 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
743 TCG_COND_EQ, TCG_COND_EQ,
744 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
745 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
748 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
749 details required to generate a TCG comparison. */
750 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
752 TCGCond cond;
753 enum cc_op old_cc_op = s->cc_op;
755 if (mask == 15 || mask == 0) {
756 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
757 c->u.s32.a = cc_op;
758 c->u.s32.b = cc_op;
759 c->g1 = c->g2 = true;
760 c->is_64 = false;
761 return;
764 /* Find the TCG condition for the mask + cc op. */
765 switch (old_cc_op) {
766 case CC_OP_LTGT0_32:
767 case CC_OP_LTGT0_64:
768 case CC_OP_LTGT_32:
769 case CC_OP_LTGT_64:
770 cond = ltgt_cond[mask];
771 if (cond == TCG_COND_NEVER) {
772 goto do_dynamic;
774 account_inline_branch(s, old_cc_op);
775 break;
777 case CC_OP_LTUGTU_32:
778 case CC_OP_LTUGTU_64:
779 cond = tcg_unsigned_cond(ltgt_cond[mask]);
780 if (cond == TCG_COND_NEVER) {
781 goto do_dynamic;
783 account_inline_branch(s, old_cc_op);
784 break;
786 case CC_OP_NZ:
787 cond = nz_cond[mask];
788 if (cond == TCG_COND_NEVER) {
789 goto do_dynamic;
791 account_inline_branch(s, old_cc_op);
792 break;
794 case CC_OP_TM_32:
795 case CC_OP_TM_64:
796 switch (mask) {
797 case 8:
798 cond = TCG_COND_EQ;
799 break;
800 case 4 | 2 | 1:
801 cond = TCG_COND_NE;
802 break;
803 default:
804 goto do_dynamic;
806 account_inline_branch(s, old_cc_op);
807 break;
809 case CC_OP_ICM:
810 switch (mask) {
811 case 8:
812 cond = TCG_COND_EQ;
813 break;
814 case 4 | 2 | 1:
815 case 4 | 2:
816 cond = TCG_COND_NE;
817 break;
818 default:
819 goto do_dynamic;
821 account_inline_branch(s, old_cc_op);
822 break;
824 case CC_OP_FLOGR:
825 switch (mask & 0xa) {
826 case 8: /* src == 0 -> no one bit found */
827 cond = TCG_COND_EQ;
828 break;
829 case 2: /* src != 0 -> one bit found */
830 cond = TCG_COND_NE;
831 break;
832 default:
833 goto do_dynamic;
835 account_inline_branch(s, old_cc_op);
836 break;
838 case CC_OP_ADDU:
839 case CC_OP_SUBU:
840 switch (mask) {
841 case 8 | 2: /* result == 0 */
842 cond = TCG_COND_EQ;
843 break;
844 case 4 | 1: /* result != 0 */
845 cond = TCG_COND_NE;
846 break;
847 case 8 | 4: /* !carry (borrow) */
848 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
849 break;
850 case 2 | 1: /* carry (!borrow) */
851 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
852 break;
853 default:
854 goto do_dynamic;
856 account_inline_branch(s, old_cc_op);
857 break;
859 default:
860 do_dynamic:
861 /* Calculate cc value. */
862 gen_op_calc_cc(s);
863 /* FALLTHRU */
865 case CC_OP_STATIC:
866 /* Jump based on CC. We'll load up the real cond below;
867 the assignment here merely avoids a compiler warning. */
868 account_noninline_branch(s, old_cc_op);
869 old_cc_op = CC_OP_STATIC;
870 cond = TCG_COND_NEVER;
871 break;
874 /* Load up the arguments of the comparison. */
875 c->is_64 = true;
876 c->g1 = c->g2 = false;
877 switch (old_cc_op) {
878 case CC_OP_LTGT0_32:
879 c->is_64 = false;
880 c->u.s32.a = tcg_temp_new_i32();
881 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
882 c->u.s32.b = tcg_const_i32(0);
883 break;
884 case CC_OP_LTGT_32:
885 case CC_OP_LTUGTU_32:
886 c->is_64 = false;
887 c->u.s32.a = tcg_temp_new_i32();
888 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
889 c->u.s32.b = tcg_temp_new_i32();
890 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
891 break;
893 case CC_OP_LTGT0_64:
894 case CC_OP_NZ:
895 case CC_OP_FLOGR:
896 c->u.s64.a = cc_dst;
897 c->u.s64.b = tcg_const_i64(0);
898 c->g1 = true;
899 break;
900 case CC_OP_LTGT_64:
901 case CC_OP_LTUGTU_64:
902 c->u.s64.a = cc_src;
903 c->u.s64.b = cc_dst;
904 c->g1 = c->g2 = true;
905 break;
907 case CC_OP_TM_32:
908 case CC_OP_TM_64:
909 case CC_OP_ICM:
910 c->u.s64.a = tcg_temp_new_i64();
911 c->u.s64.b = tcg_const_i64(0);
912 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
913 break;
915 case CC_OP_ADDU:
916 case CC_OP_SUBU:
917 c->is_64 = true;
918 c->u.s64.b = tcg_const_i64(0);
919 c->g1 = true;
920 switch (mask) {
921 case 8 | 2:
922 case 4 | 1: /* result */
923 c->u.s64.a = cc_dst;
924 break;
925 case 8 | 4:
926 case 2 | 1: /* carry */
927 c->u.s64.a = cc_src;
928 break;
929 default:
930 g_assert_not_reached();
932 break;
934 case CC_OP_STATIC:
935 c->is_64 = false;
936 c->u.s32.a = cc_op;
937 c->g1 = true;
938 switch (mask) {
939 case 0x8 | 0x4 | 0x2: /* cc != 3 */
940 cond = TCG_COND_NE;
941 c->u.s32.b = tcg_const_i32(3);
942 break;
943 case 0x8 | 0x4 | 0x1: /* cc != 2 */
944 cond = TCG_COND_NE;
945 c->u.s32.b = tcg_const_i32(2);
946 break;
947 case 0x8 | 0x2 | 0x1: /* cc != 1 */
948 cond = TCG_COND_NE;
949 c->u.s32.b = tcg_const_i32(1);
950 break;
951 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
952 cond = TCG_COND_EQ;
953 c->g1 = false;
954 c->u.s32.a = tcg_temp_new_i32();
955 c->u.s32.b = tcg_const_i32(0);
956 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
957 break;
958 case 0x8 | 0x4: /* cc < 2 */
959 cond = TCG_COND_LTU;
960 c->u.s32.b = tcg_const_i32(2);
961 break;
962 case 0x8: /* cc == 0 */
963 cond = TCG_COND_EQ;
964 c->u.s32.b = tcg_const_i32(0);
965 break;
966 case 0x4 | 0x2 | 0x1: /* cc != 0 */
967 cond = TCG_COND_NE;
968 c->u.s32.b = tcg_const_i32(0);
969 break;
970 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
971 cond = TCG_COND_NE;
972 c->g1 = false;
973 c->u.s32.a = tcg_temp_new_i32();
974 c->u.s32.b = tcg_const_i32(0);
975 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
976 break;
977 case 0x4: /* cc == 1 */
978 cond = TCG_COND_EQ;
979 c->u.s32.b = tcg_const_i32(1);
980 break;
981 case 0x2 | 0x1: /* cc > 1 */
982 cond = TCG_COND_GTU;
983 c->u.s32.b = tcg_const_i32(1);
984 break;
985 case 0x2: /* cc == 2 */
986 cond = TCG_COND_EQ;
987 c->u.s32.b = tcg_const_i32(2);
988 break;
989 case 0x1: /* cc == 3 */
990 cond = TCG_COND_EQ;
991 c->u.s32.b = tcg_const_i32(3);
992 break;
993 default:
994 /* CC is masked by something else: (8 >> cc) & mask. */
995 cond = TCG_COND_NE;
996 c->g1 = false;
997 c->u.s32.a = tcg_const_i32(8);
998 c->u.s32.b = tcg_const_i32(0);
999 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
1000 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1001 break;
1003 break;
1005 default:
1006 abort();
1008 c->cond = cond;
1011 static void free_compare(DisasCompare *c)
1013 if (!c->g1) {
1014 if (c->is_64) {
1015 tcg_temp_free_i64(c->u.s64.a);
1016 } else {
1017 tcg_temp_free_i32(c->u.s32.a);
1020 if (!c->g2) {
1021 if (c->is_64) {
1022 tcg_temp_free_i64(c->u.s64.b);
1023 } else {
1024 tcg_temp_free_i32(c->u.s32.b);
1029 /* ====================================================================== */
1030 /* Define the insn format enumeration. */
1031 #define F0(N) FMT_##N,
1032 #define F1(N, X1) F0(N)
1033 #define F2(N, X1, X2) F0(N)
1034 #define F3(N, X1, X2, X3) F0(N)
1035 #define F4(N, X1, X2, X3, X4) F0(N)
1036 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1037 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1039 typedef enum {
1040 #include "insn-format.def"
1041 } DisasFormat;
1043 #undef F0
1044 #undef F1
1045 #undef F2
1046 #undef F3
1047 #undef F4
1048 #undef F5
1049 #undef F6
1051 /* This is the way fields are to be accessed out of DisasFields. */
1052 #define have_field(S, F) have_field1((S), FLD_O_##F)
1053 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1055 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1057 return (s->fields.presentO >> c) & 1;
1060 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1061 enum DisasFieldIndexC c)
1063 assert(have_field1(s, o));
1064 return s->fields.c[c];
1067 /* Describe the layout of each field in each format. */
1068 typedef struct DisasField {
1069 unsigned int beg:8;
1070 unsigned int size:8;
1071 unsigned int type:2;
1072 unsigned int indexC:6;
1073 enum DisasFieldIndexO indexO:8;
1074 } DisasField;
1076 typedef struct DisasFormatInfo {
1077 DisasField op[NUM_C_FIELD];
1078 } DisasFormatInfo;
1080 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1081 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1082 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1083 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1084 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1085 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1086 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1087 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1088 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1089 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1090 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1091 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1092 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1093 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1094 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1096 #define F0(N) { { } },
1097 #define F1(N, X1) { { X1 } },
1098 #define F2(N, X1, X2) { { X1, X2 } },
1099 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1100 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1101 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1102 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1104 static const DisasFormatInfo format_info[] = {
1105 #include "insn-format.def"
1108 #undef F0
1109 #undef F1
1110 #undef F2
1111 #undef F3
1112 #undef F4
1113 #undef F5
1114 #undef F6
1115 #undef R
1116 #undef M
1117 #undef V
1118 #undef BD
1119 #undef BXD
1120 #undef BDL
1121 #undef BXDL
1122 #undef I
1123 #undef L
1125 /* Generally, we'll extract operands into this structures, operate upon
1126 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1127 of routines below for more details. */
1128 typedef struct {
1129 bool g_out, g_out2, g_in1, g_in2;
1130 TCGv_i64 out, out2, in1, in2;
1131 TCGv_i64 addr1;
1132 } DisasOps;
1134 /* Instructions can place constraints on their operands, raising specification
1135 exceptions if they are violated. To make this easy to automate, each "in1",
1136 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1137 of the following, or 0. To make this easy to document, we'll put the
1138 SPEC_<name> defines next to <name>. */
1140 #define SPEC_r1_even 1
1141 #define SPEC_r2_even 2
1142 #define SPEC_r3_even 4
1143 #define SPEC_r1_f128 8
1144 #define SPEC_r2_f128 16
1146 /* Return values from translate_one, indicating the state of the TB. */
1148 /* We are not using a goto_tb (for whatever reason), but have updated
1149 the PC (for whatever reason), so there's no need to do it again on
1150 exiting the TB. */
1151 #define DISAS_PC_UPDATED DISAS_TARGET_0
1153 /* We have emitted one or more goto_tb. No fixup required. */
1154 #define DISAS_GOTO_TB DISAS_TARGET_1
1156 /* We have updated the PC and CC values. */
1157 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1159 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1160 updated the PC for the next instruction to be executed. */
1161 #define DISAS_PC_STALE DISAS_TARGET_3
1163 /* We are exiting the TB to the main loop. */
1164 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1167 /* Instruction flags */
1168 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1169 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1170 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1171 #define IF_BFP 0x0008 /* binary floating point instruction */
1172 #define IF_DFP 0x0010 /* decimal floating point instruction */
1173 #define IF_PRIV 0x0020 /* privileged instruction */
1174 #define IF_VEC 0x0040 /* vector instruction */
1175 #define IF_IO 0x0080 /* input/output instruction */
1177 struct DisasInsn {
1178 unsigned opc:16;
1179 unsigned flags:16;
1180 DisasFormat fmt:8;
1181 unsigned fac:8;
1182 unsigned spec:8;
1184 const char *name;
1186 /* Pre-process arguments before HELP_OP. */
1187 void (*help_in1)(DisasContext *, DisasOps *);
1188 void (*help_in2)(DisasContext *, DisasOps *);
1189 void (*help_prep)(DisasContext *, DisasOps *);
1192 * Post-process output after HELP_OP.
1193 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1195 void (*help_wout)(DisasContext *, DisasOps *);
1196 void (*help_cout)(DisasContext *, DisasOps *);
1198 /* Implement the operation itself. */
1199 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1201 uint64_t data;
1204 /* ====================================================================== */
1205 /* Miscellaneous helpers, used by several operations. */
1207 static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1209 int b2 = get_field(s, b2);
1210 int d2 = get_field(s, d2);
1212 if (b2 == 0) {
1213 o->in2 = tcg_const_i64(d2 & mask);
1214 } else {
1215 o->in2 = get_address(s, 0, b2, d2);
1216 tcg_gen_andi_i64(o->in2, o->in2, mask);
1220 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1222 if (dest == s->pc_tmp) {
1223 per_branch(s, true);
1224 return DISAS_NEXT;
1226 if (use_goto_tb(s, dest)) {
1227 update_cc_op(s);
1228 per_breaking_event(s);
1229 tcg_gen_goto_tb(0);
1230 tcg_gen_movi_i64(psw_addr, dest);
1231 tcg_gen_exit_tb(s->base.tb, 0);
1232 return DISAS_GOTO_TB;
1233 } else {
1234 tcg_gen_movi_i64(psw_addr, dest);
1235 per_branch(s, false);
1236 return DISAS_PC_UPDATED;
1240 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1241 bool is_imm, int imm, TCGv_i64 cdest)
1243 DisasJumpType ret;
1244 uint64_t dest = s->base.pc_next + 2 * imm;
1245 TCGLabel *lab;
1247 /* Take care of the special cases first. */
1248 if (c->cond == TCG_COND_NEVER) {
1249 ret = DISAS_NEXT;
1250 goto egress;
1252 if (is_imm) {
1253 if (dest == s->pc_tmp) {
1254 /* Branch to next. */
1255 per_branch(s, true);
1256 ret = DISAS_NEXT;
1257 goto egress;
1259 if (c->cond == TCG_COND_ALWAYS) {
1260 ret = help_goto_direct(s, dest);
1261 goto egress;
1263 } else {
1264 if (!cdest) {
1265 /* E.g. bcr %r0 -> no branch. */
1266 ret = DISAS_NEXT;
1267 goto egress;
1269 if (c->cond == TCG_COND_ALWAYS) {
1270 tcg_gen_mov_i64(psw_addr, cdest);
1271 per_branch(s, false);
1272 ret = DISAS_PC_UPDATED;
1273 goto egress;
1277 if (use_goto_tb(s, s->pc_tmp)) {
1278 if (is_imm && use_goto_tb(s, dest)) {
1279 /* Both exits can use goto_tb. */
1280 update_cc_op(s);
1282 lab = gen_new_label();
1283 if (c->is_64) {
1284 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1285 } else {
1286 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1289 /* Branch not taken. */
1290 tcg_gen_goto_tb(0);
1291 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1292 tcg_gen_exit_tb(s->base.tb, 0);
1294 /* Branch taken. */
1295 gen_set_label(lab);
1296 per_breaking_event(s);
1297 tcg_gen_goto_tb(1);
1298 tcg_gen_movi_i64(psw_addr, dest);
1299 tcg_gen_exit_tb(s->base.tb, 1);
1301 ret = DISAS_GOTO_TB;
1302 } else {
1303 /* Fallthru can use goto_tb, but taken branch cannot. */
1304 /* Store taken branch destination before the brcond. This
1305 avoids having to allocate a new local temp to hold it.
1306 We'll overwrite this in the not taken case anyway. */
1307 if (!is_imm) {
1308 tcg_gen_mov_i64(psw_addr, cdest);
1311 lab = gen_new_label();
1312 if (c->is_64) {
1313 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1314 } else {
1315 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1318 /* Branch not taken. */
1319 update_cc_op(s);
1320 tcg_gen_goto_tb(0);
1321 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1322 tcg_gen_exit_tb(s->base.tb, 0);
1324 gen_set_label(lab);
1325 if (is_imm) {
1326 tcg_gen_movi_i64(psw_addr, dest);
1328 per_breaking_event(s);
1329 ret = DISAS_PC_UPDATED;
1331 } else {
1332 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1333 Most commonly we're single-stepping or some other condition that
1334 disables all use of goto_tb. Just update the PC and exit. */
1336 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1337 if (is_imm) {
1338 cdest = tcg_const_i64(dest);
1341 if (c->is_64) {
1342 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1343 cdest, next);
1344 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1345 } else {
1346 TCGv_i32 t0 = tcg_temp_new_i32();
1347 TCGv_i64 t1 = tcg_temp_new_i64();
1348 TCGv_i64 z = tcg_const_i64(0);
1349 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1350 tcg_gen_extu_i32_i64(t1, t0);
1351 tcg_temp_free_i32(t0);
1352 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1353 per_branch_cond(s, TCG_COND_NE, t1, z);
1354 tcg_temp_free_i64(t1);
1355 tcg_temp_free_i64(z);
1358 if (is_imm) {
1359 tcg_temp_free_i64(cdest);
1361 tcg_temp_free_i64(next);
1363 ret = DISAS_PC_UPDATED;
1366 egress:
1367 free_compare(c);
1368 return ret;
1371 /* ====================================================================== */
1372 /* The operations. These perform the bulk of the work for any insn,
1373 usually after the operands have been loaded and output initialized. */
1375 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1377 tcg_gen_abs_i64(o->out, o->in2);
1378 return DISAS_NEXT;
1381 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1383 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1384 return DISAS_NEXT;
1387 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1389 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1390 return DISAS_NEXT;
1393 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1395 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1396 tcg_gen_mov_i64(o->out2, o->in2);
1397 return DISAS_NEXT;
1400 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1402 tcg_gen_add_i64(o->out, o->in1, o->in2);
1403 return DISAS_NEXT;
1406 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1408 tcg_gen_movi_i64(cc_src, 0);
1409 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1410 return DISAS_NEXT;
1413 /* Compute carry into cc_src. */
1414 static void compute_carry(DisasContext *s)
1416 switch (s->cc_op) {
1417 case CC_OP_ADDU:
1418 /* The carry value is already in cc_src (1,0). */
1419 break;
1420 case CC_OP_SUBU:
1421 tcg_gen_addi_i64(cc_src, cc_src, 1);
1422 break;
1423 default:
1424 gen_op_calc_cc(s);
1425 /* fall through */
1426 case CC_OP_STATIC:
1427 /* The carry flag is the msb of CC; compute into cc_src. */
1428 tcg_gen_extu_i32_i64(cc_src, cc_op);
1429 tcg_gen_shri_i64(cc_src, cc_src, 1);
1430 break;
1434 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1436 compute_carry(s);
1437 tcg_gen_add_i64(o->out, o->in1, o->in2);
1438 tcg_gen_add_i64(o->out, o->out, cc_src);
1439 return DISAS_NEXT;
1442 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1444 compute_carry(s);
1446 TCGv_i64 zero = tcg_const_i64(0);
1447 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1448 tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1449 tcg_temp_free_i64(zero);
1451 return DISAS_NEXT;
1454 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1456 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1458 o->in1 = tcg_temp_new_i64();
1459 if (non_atomic) {
1460 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1461 } else {
1462 /* Perform the atomic addition in memory. */
1463 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1464 s->insn->data);
1467 /* Recompute also for atomic case: needed for setting CC. */
1468 tcg_gen_add_i64(o->out, o->in1, o->in2);
1470 if (non_atomic) {
1471 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1473 return DISAS_NEXT;
1476 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1478 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1480 o->in1 = tcg_temp_new_i64();
1481 if (non_atomic) {
1482 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1483 } else {
1484 /* Perform the atomic addition in memory. */
1485 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1486 s->insn->data);
1489 /* Recompute also for atomic case: needed for setting CC. */
1490 tcg_gen_movi_i64(cc_src, 0);
1491 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1493 if (non_atomic) {
1494 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1496 return DISAS_NEXT;
1499 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1501 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1502 return DISAS_NEXT;
1505 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1507 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1508 return DISAS_NEXT;
1511 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1513 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1514 return_low128(o->out2);
1515 return DISAS_NEXT;
1518 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1520 tcg_gen_and_i64(o->out, o->in1, o->in2);
1521 return DISAS_NEXT;
1524 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1526 int shift = s->insn->data & 0xff;
1527 int size = s->insn->data >> 8;
1528 uint64_t mask = ((1ull << size) - 1) << shift;
1530 assert(!o->g_in2);
1531 tcg_gen_shli_i64(o->in2, o->in2, shift);
1532 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1533 tcg_gen_and_i64(o->out, o->in1, o->in2);
1535 /* Produce the CC from only the bits manipulated. */
1536 tcg_gen_andi_i64(cc_dst, o->out, mask);
1537 set_cc_nz_u64(s, cc_dst);
1538 return DISAS_NEXT;
1541 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1543 o->in1 = tcg_temp_new_i64();
1545 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1546 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1547 } else {
1548 /* Perform the atomic operation in memory. */
1549 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1550 s->insn->data);
1553 /* Recompute also for atomic case: needed for setting CC. */
1554 tcg_gen_and_i64(o->out, o->in1, o->in2);
1556 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1557 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1559 return DISAS_NEXT;
1562 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1564 pc_to_link_info(o->out, s, s->pc_tmp);
1565 if (o->in2) {
1566 tcg_gen_mov_i64(psw_addr, o->in2);
1567 per_branch(s, false);
1568 return DISAS_PC_UPDATED;
1569 } else {
1570 return DISAS_NEXT;
1574 static void save_link_info(DisasContext *s, DisasOps *o)
1576 TCGv_i64 t;
1578 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1579 pc_to_link_info(o->out, s, s->pc_tmp);
1580 return;
1582 gen_op_calc_cc(s);
1583 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1584 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1585 t = tcg_temp_new_i64();
1586 tcg_gen_shri_i64(t, psw_mask, 16);
1587 tcg_gen_andi_i64(t, t, 0x0f000000);
1588 tcg_gen_or_i64(o->out, o->out, t);
1589 tcg_gen_extu_i32_i64(t, cc_op);
1590 tcg_gen_shli_i64(t, t, 28);
1591 tcg_gen_or_i64(o->out, o->out, t);
1592 tcg_temp_free_i64(t);
1595 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1597 save_link_info(s, o);
1598 if (o->in2) {
1599 tcg_gen_mov_i64(psw_addr, o->in2);
1600 per_branch(s, false);
1601 return DISAS_PC_UPDATED;
1602 } else {
1603 return DISAS_NEXT;
1607 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1609 pc_to_link_info(o->out, s, s->pc_tmp);
1610 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1613 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1615 int m1 = get_field(s, m1);
1616 bool is_imm = have_field(s, i2);
1617 int imm = is_imm ? get_field(s, i2) : 0;
1618 DisasCompare c;
1620 /* BCR with R2 = 0 causes no branching */
1621 if (have_field(s, r2) && get_field(s, r2) == 0) {
1622 if (m1 == 14) {
1623 /* Perform serialization */
1624 /* FIXME: check for fast-BCR-serialization facility */
1625 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1627 if (m1 == 15) {
1628 /* Perform serialization */
1629 /* FIXME: perform checkpoint-synchronisation */
1630 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1632 return DISAS_NEXT;
1635 disas_jcc(s, &c, m1);
1636 return help_branch(s, &c, is_imm, imm, o->in2);
1639 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1641 int r1 = get_field(s, r1);
1642 bool is_imm = have_field(s, i2);
1643 int imm = is_imm ? get_field(s, i2) : 0;
1644 DisasCompare c;
1645 TCGv_i64 t;
1647 c.cond = TCG_COND_NE;
1648 c.is_64 = false;
1649 c.g1 = false;
1650 c.g2 = false;
1652 t = tcg_temp_new_i64();
1653 tcg_gen_subi_i64(t, regs[r1], 1);
1654 store_reg32_i64(r1, t);
1655 c.u.s32.a = tcg_temp_new_i32();
1656 c.u.s32.b = tcg_const_i32(0);
1657 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1658 tcg_temp_free_i64(t);
1660 return help_branch(s, &c, is_imm, imm, o->in2);
1663 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1665 int r1 = get_field(s, r1);
1666 int imm = get_field(s, i2);
1667 DisasCompare c;
1668 TCGv_i64 t;
1670 c.cond = TCG_COND_NE;
1671 c.is_64 = false;
1672 c.g1 = false;
1673 c.g2 = false;
1675 t = tcg_temp_new_i64();
1676 tcg_gen_shri_i64(t, regs[r1], 32);
1677 tcg_gen_subi_i64(t, t, 1);
1678 store_reg32h_i64(r1, t);
1679 c.u.s32.a = tcg_temp_new_i32();
1680 c.u.s32.b = tcg_const_i32(0);
1681 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1682 tcg_temp_free_i64(t);
1684 return help_branch(s, &c, 1, imm, o->in2);
1687 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1689 int r1 = get_field(s, r1);
1690 bool is_imm = have_field(s, i2);
1691 int imm = is_imm ? get_field(s, i2) : 0;
1692 DisasCompare c;
1694 c.cond = TCG_COND_NE;
1695 c.is_64 = true;
1696 c.g1 = true;
1697 c.g2 = false;
1699 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1700 c.u.s64.a = regs[r1];
1701 c.u.s64.b = tcg_const_i64(0);
1703 return help_branch(s, &c, is_imm, imm, o->in2);
1706 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1708 int r1 = get_field(s, r1);
1709 int r3 = get_field(s, r3);
1710 bool is_imm = have_field(s, i2);
1711 int imm = is_imm ? get_field(s, i2) : 0;
1712 DisasCompare c;
1713 TCGv_i64 t;
1715 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1716 c.is_64 = false;
1717 c.g1 = false;
1718 c.g2 = false;
1720 t = tcg_temp_new_i64();
1721 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1722 c.u.s32.a = tcg_temp_new_i32();
1723 c.u.s32.b = tcg_temp_new_i32();
1724 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1725 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1726 store_reg32_i64(r1, t);
1727 tcg_temp_free_i64(t);
1729 return help_branch(s, &c, is_imm, imm, o->in2);
1732 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1734 int r1 = get_field(s, r1);
1735 int r3 = get_field(s, r3);
1736 bool is_imm = have_field(s, i2);
1737 int imm = is_imm ? get_field(s, i2) : 0;
1738 DisasCompare c;
1740 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1741 c.is_64 = true;
1743 if (r1 == (r3 | 1)) {
1744 c.u.s64.b = load_reg(r3 | 1);
1745 c.g2 = false;
1746 } else {
1747 c.u.s64.b = regs[r3 | 1];
1748 c.g2 = true;
1751 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1752 c.u.s64.a = regs[r1];
1753 c.g1 = true;
1755 return help_branch(s, &c, is_imm, imm, o->in2);
1758 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1760 int imm, m3 = get_field(s, m3);
1761 bool is_imm;
1762 DisasCompare c;
1764 c.cond = ltgt_cond[m3];
1765 if (s->insn->data) {
1766 c.cond = tcg_unsigned_cond(c.cond);
1768 c.is_64 = c.g1 = c.g2 = true;
1769 c.u.s64.a = o->in1;
1770 c.u.s64.b = o->in2;
1772 is_imm = have_field(s, i4);
1773 if (is_imm) {
1774 imm = get_field(s, i4);
1775 } else {
1776 imm = 0;
1777 o->out = get_address(s, 0, get_field(s, b4),
1778 get_field(s, d4));
1781 return help_branch(s, &c, is_imm, imm, o->out);
1784 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1786 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1787 set_cc_static(s);
1788 return DISAS_NEXT;
1791 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1793 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1794 set_cc_static(s);
1795 return DISAS_NEXT;
1798 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1800 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1801 set_cc_static(s);
1802 return DISAS_NEXT;
1805 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1806 bool m4_with_fpe)
1808 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1809 uint8_t m3 = get_field(s, m3);
1810 uint8_t m4 = get_field(s, m4);
1812 /* m3 field was introduced with FPE */
1813 if (!fpe && m3_with_fpe) {
1814 m3 = 0;
1816 /* m4 field was introduced with FPE */
1817 if (!fpe && m4_with_fpe) {
1818 m4 = 0;
1821 /* Check for valid rounding modes. Mode 3 was introduced later. */
1822 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1823 gen_program_exception(s, PGM_SPECIFICATION);
1824 return NULL;
1827 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1830 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1832 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1834 if (!m34) {
1835 return DISAS_NORETURN;
1837 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1838 tcg_temp_free_i32(m34);
1839 gen_set_cc_nz_f32(s, o->in2);
1840 return DISAS_NEXT;
1843 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1845 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1847 if (!m34) {
1848 return DISAS_NORETURN;
1850 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1851 tcg_temp_free_i32(m34);
1852 gen_set_cc_nz_f64(s, o->in2);
1853 return DISAS_NEXT;
1856 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1858 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1860 if (!m34) {
1861 return DISAS_NORETURN;
1863 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1864 tcg_temp_free_i32(m34);
1865 gen_set_cc_nz_f128(s, o->in1, o->in2);
1866 return DISAS_NEXT;
1869 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1871 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1873 if (!m34) {
1874 return DISAS_NORETURN;
1876 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1877 tcg_temp_free_i32(m34);
1878 gen_set_cc_nz_f32(s, o->in2);
1879 return DISAS_NEXT;
1882 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1884 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1886 if (!m34) {
1887 return DISAS_NORETURN;
1889 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1890 tcg_temp_free_i32(m34);
1891 gen_set_cc_nz_f64(s, o->in2);
1892 return DISAS_NEXT;
1895 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1897 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1899 if (!m34) {
1900 return DISAS_NORETURN;
1902 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1903 tcg_temp_free_i32(m34);
1904 gen_set_cc_nz_f128(s, o->in1, o->in2);
1905 return DISAS_NEXT;
1908 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1910 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1912 if (!m34) {
1913 return DISAS_NORETURN;
1915 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1916 tcg_temp_free_i32(m34);
1917 gen_set_cc_nz_f32(s, o->in2);
1918 return DISAS_NEXT;
1921 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1923 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1925 if (!m34) {
1926 return DISAS_NORETURN;
1928 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1929 tcg_temp_free_i32(m34);
1930 gen_set_cc_nz_f64(s, o->in2);
1931 return DISAS_NEXT;
1934 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1936 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1938 if (!m34) {
1939 return DISAS_NORETURN;
1941 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1942 tcg_temp_free_i32(m34);
1943 gen_set_cc_nz_f128(s, o->in1, o->in2);
1944 return DISAS_NEXT;
1947 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1949 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1951 if (!m34) {
1952 return DISAS_NORETURN;
1954 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1955 tcg_temp_free_i32(m34);
1956 gen_set_cc_nz_f32(s, o->in2);
1957 return DISAS_NEXT;
1960 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1962 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1964 if (!m34) {
1965 return DISAS_NORETURN;
1967 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1968 tcg_temp_free_i32(m34);
1969 gen_set_cc_nz_f64(s, o->in2);
1970 return DISAS_NEXT;
1973 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1975 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1977 if (!m34) {
1978 return DISAS_NORETURN;
1980 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1981 tcg_temp_free_i32(m34);
1982 gen_set_cc_nz_f128(s, o->in1, o->in2);
1983 return DISAS_NEXT;
1986 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1988 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1990 if (!m34) {
1991 return DISAS_NORETURN;
1993 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1994 tcg_temp_free_i32(m34);
1995 return DISAS_NEXT;
1998 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
2000 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2002 if (!m34) {
2003 return DISAS_NORETURN;
2005 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2006 tcg_temp_free_i32(m34);
2007 return DISAS_NEXT;
2010 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2012 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2014 if (!m34) {
2015 return DISAS_NORETURN;
2017 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2018 tcg_temp_free_i32(m34);
2019 return_low128(o->out2);
2020 return DISAS_NEXT;
2023 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2025 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2027 if (!m34) {
2028 return DISAS_NORETURN;
2030 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2031 tcg_temp_free_i32(m34);
2032 return DISAS_NEXT;
2035 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2037 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2039 if (!m34) {
2040 return DISAS_NORETURN;
2042 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2043 tcg_temp_free_i32(m34);
2044 return DISAS_NEXT;
2047 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2049 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2051 if (!m34) {
2052 return DISAS_NORETURN;
2054 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2055 tcg_temp_free_i32(m34);
2056 return_low128(o->out2);
2057 return DISAS_NEXT;
2060 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2062 int r2 = get_field(s, r2);
2063 TCGv_i64 len = tcg_temp_new_i64();
2065 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2066 set_cc_static(s);
2067 return_low128(o->out);
2069 tcg_gen_add_i64(regs[r2], regs[r2], len);
2070 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2071 tcg_temp_free_i64(len);
2073 return DISAS_NEXT;
2076 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2078 int l = get_field(s, l1);
2079 TCGv_i32 vl;
2081 switch (l + 1) {
2082 case 1:
2083 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2084 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2085 break;
2086 case 2:
2087 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2088 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2089 break;
2090 case 4:
2091 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2092 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2093 break;
2094 case 8:
2095 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2096 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2097 break;
2098 default:
2099 vl = tcg_const_i32(l);
2100 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2101 tcg_temp_free_i32(vl);
2102 set_cc_static(s);
2103 return DISAS_NEXT;
2105 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2106 return DISAS_NEXT;
2109 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2111 int r1 = get_field(s, r1);
2112 int r2 = get_field(s, r2);
2113 TCGv_i32 t1, t2;
2115 /* r1 and r2 must be even. */
2116 if (r1 & 1 || r2 & 1) {
2117 gen_program_exception(s, PGM_SPECIFICATION);
2118 return DISAS_NORETURN;
2121 t1 = tcg_const_i32(r1);
2122 t2 = tcg_const_i32(r2);
2123 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2124 tcg_temp_free_i32(t1);
2125 tcg_temp_free_i32(t2);
2126 set_cc_static(s);
2127 return DISAS_NEXT;
2130 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2132 int r1 = get_field(s, r1);
2133 int r3 = get_field(s, r3);
2134 TCGv_i32 t1, t3;
2136 /* r1 and r3 must be even. */
2137 if (r1 & 1 || r3 & 1) {
2138 gen_program_exception(s, PGM_SPECIFICATION);
2139 return DISAS_NORETURN;
2142 t1 = tcg_const_i32(r1);
2143 t3 = tcg_const_i32(r3);
2144 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2145 tcg_temp_free_i32(t1);
2146 tcg_temp_free_i32(t3);
2147 set_cc_static(s);
2148 return DISAS_NEXT;
2151 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2153 int r1 = get_field(s, r1);
2154 int r3 = get_field(s, r3);
2155 TCGv_i32 t1, t3;
2157 /* r1 and r3 must be even. */
2158 if (r1 & 1 || r3 & 1) {
2159 gen_program_exception(s, PGM_SPECIFICATION);
2160 return DISAS_NORETURN;
2163 t1 = tcg_const_i32(r1);
2164 t3 = tcg_const_i32(r3);
2165 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2166 tcg_temp_free_i32(t1);
2167 tcg_temp_free_i32(t3);
2168 set_cc_static(s);
2169 return DISAS_NEXT;
2172 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2174 TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2175 TCGv_i32 t1 = tcg_temp_new_i32();
2176 tcg_gen_extrl_i64_i32(t1, o->in1);
2177 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2178 set_cc_static(s);
2179 tcg_temp_free_i32(t1);
2180 tcg_temp_free_i32(m3);
2181 return DISAS_NEXT;
2184 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2186 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2187 set_cc_static(s);
2188 return_low128(o->in2);
2189 return DISAS_NEXT;
2192 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2194 TCGv_i64 t = tcg_temp_new_i64();
2195 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2196 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2197 tcg_gen_or_i64(o->out, o->out, t);
2198 tcg_temp_free_i64(t);
2199 return DISAS_NEXT;
2202 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2204 int d2 = get_field(s, d2);
2205 int b2 = get_field(s, b2);
2206 TCGv_i64 addr, cc;
2208 /* Note that in1 = R3 (new value) and
2209 in2 = (zero-extended) R1 (expected value). */
2211 addr = get_address(s, 0, b2, d2);
2212 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2213 get_mem_index(s), s->insn->data | MO_ALIGN);
2214 tcg_temp_free_i64(addr);
2216 /* Are the memory and expected values (un)equal? Note that this setcond
2217 produces the output CC value, thus the NE sense of the test. */
2218 cc = tcg_temp_new_i64();
2219 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2220 tcg_gen_extrl_i64_i32(cc_op, cc);
2221 tcg_temp_free_i64(cc);
2222 set_cc_static(s);
2224 return DISAS_NEXT;
2227 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2229 int r1 = get_field(s, r1);
2230 int r3 = get_field(s, r3);
2231 int d2 = get_field(s, d2);
2232 int b2 = get_field(s, b2);
2233 DisasJumpType ret = DISAS_NEXT;
2234 TCGv_i64 addr;
2235 TCGv_i32 t_r1, t_r3;
2237 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2238 addr = get_address(s, 0, b2, d2);
2239 t_r1 = tcg_const_i32(r1);
2240 t_r3 = tcg_const_i32(r3);
2241 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2242 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2243 } else if (HAVE_CMPXCHG128) {
2244 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2245 } else {
2246 gen_helper_exit_atomic(cpu_env);
2247 ret = DISAS_NORETURN;
2249 tcg_temp_free_i64(addr);
2250 tcg_temp_free_i32(t_r1);
2251 tcg_temp_free_i32(t_r3);
2253 set_cc_static(s);
2254 return ret;
2257 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2259 int r3 = get_field(s, r3);
2260 TCGv_i32 t_r3 = tcg_const_i32(r3);
2262 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2263 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2264 } else {
2265 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2267 tcg_temp_free_i32(t_r3);
2269 set_cc_static(s);
2270 return DISAS_NEXT;
2273 #ifndef CONFIG_USER_ONLY
2274 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2276 MemOp mop = s->insn->data;
2277 TCGv_i64 addr, old, cc;
2278 TCGLabel *lab = gen_new_label();
2280 /* Note that in1 = R1 (zero-extended expected value),
2281 out = R1 (original reg), out2 = R1+1 (new value). */
2283 addr = tcg_temp_new_i64();
2284 old = tcg_temp_new_i64();
2285 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2286 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2287 get_mem_index(s), mop | MO_ALIGN);
2288 tcg_temp_free_i64(addr);
2290 /* Are the memory and expected values (un)equal? */
2291 cc = tcg_temp_new_i64();
2292 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2293 tcg_gen_extrl_i64_i32(cc_op, cc);
2295 /* Write back the output now, so that it happens before the
2296 following branch, so that we don't need local temps. */
2297 if ((mop & MO_SIZE) == MO_32) {
2298 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2299 } else {
2300 tcg_gen_mov_i64(o->out, old);
2302 tcg_temp_free_i64(old);
2304 /* If the comparison was equal, and the LSB of R2 was set,
2305 then we need to flush the TLB (for all cpus). */
2306 tcg_gen_xori_i64(cc, cc, 1);
2307 tcg_gen_and_i64(cc, cc, o->in2);
2308 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2309 tcg_temp_free_i64(cc);
2311 gen_helper_purge(cpu_env);
2312 gen_set_label(lab);
2314 return DISAS_NEXT;
2316 #endif
2318 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2320 TCGv_i64 t1 = tcg_temp_new_i64();
2321 TCGv_i32 t2 = tcg_temp_new_i32();
2322 tcg_gen_extrl_i64_i32(t2, o->in1);
2323 gen_helper_cvd(t1, t2);
2324 tcg_temp_free_i32(t2);
2325 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2326 tcg_temp_free_i64(t1);
2327 return DISAS_NEXT;
2330 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2332 int m3 = get_field(s, m3);
2333 TCGLabel *lab = gen_new_label();
2334 TCGCond c;
2336 c = tcg_invert_cond(ltgt_cond[m3]);
2337 if (s->insn->data) {
2338 c = tcg_unsigned_cond(c);
2340 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2342 /* Trap. */
2343 gen_trap(s);
2345 gen_set_label(lab);
2346 return DISAS_NEXT;
2349 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2351 int m3 = get_field(s, m3);
2352 int r1 = get_field(s, r1);
2353 int r2 = get_field(s, r2);
2354 TCGv_i32 tr1, tr2, chk;
2356 /* R1 and R2 must both be even. */
2357 if ((r1 | r2) & 1) {
2358 gen_program_exception(s, PGM_SPECIFICATION);
2359 return DISAS_NORETURN;
2361 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2362 m3 = 0;
2365 tr1 = tcg_const_i32(r1);
2366 tr2 = tcg_const_i32(r2);
2367 chk = tcg_const_i32(m3);
2369 switch (s->insn->data) {
2370 case 12:
2371 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2372 break;
2373 case 14:
2374 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2375 break;
2376 case 21:
2377 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2378 break;
2379 case 24:
2380 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2381 break;
2382 case 41:
2383 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2384 break;
2385 case 42:
2386 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2387 break;
2388 default:
2389 g_assert_not_reached();
2392 tcg_temp_free_i32(tr1);
2393 tcg_temp_free_i32(tr2);
2394 tcg_temp_free_i32(chk);
2395 set_cc_static(s);
2396 return DISAS_NEXT;
2399 #ifndef CONFIG_USER_ONLY
2400 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2402 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2403 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2404 TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2406 gen_helper_diag(cpu_env, r1, r3, func_code);
2408 tcg_temp_free_i32(func_code);
2409 tcg_temp_free_i32(r3);
2410 tcg_temp_free_i32(r1);
2411 return DISAS_NEXT;
2413 #endif
2415 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2417 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2418 return_low128(o->out);
2419 return DISAS_NEXT;
2422 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2424 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2425 return_low128(o->out);
2426 return DISAS_NEXT;
2429 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2431 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2432 return_low128(o->out);
2433 return DISAS_NEXT;
2436 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2438 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2439 return_low128(o->out);
2440 return DISAS_NEXT;
2443 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2445 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2446 return DISAS_NEXT;
2449 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2451 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2452 return DISAS_NEXT;
2455 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2457 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2458 return_low128(o->out2);
2459 return DISAS_NEXT;
2462 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2464 int r2 = get_field(s, r2);
2465 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2466 return DISAS_NEXT;
2469 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2471 /* No cache information provided. */
2472 tcg_gen_movi_i64(o->out, -1);
2473 return DISAS_NEXT;
2476 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2478 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2479 return DISAS_NEXT;
2482 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2484 int r1 = get_field(s, r1);
2485 int r2 = get_field(s, r2);
2486 TCGv_i64 t = tcg_temp_new_i64();
2488 /* Note the "subsequently" in the PoO, which implies a defined result
2489 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2490 tcg_gen_shri_i64(t, psw_mask, 32);
2491 store_reg32_i64(r1, t);
2492 if (r2 != 0) {
2493 store_reg32_i64(r2, psw_mask);
2496 tcg_temp_free_i64(t);
2497 return DISAS_NEXT;
2500 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2502 int r1 = get_field(s, r1);
2503 TCGv_i32 ilen;
2504 TCGv_i64 v1;
2506 /* Nested EXECUTE is not allowed. */
2507 if (unlikely(s->ex_value)) {
2508 gen_program_exception(s, PGM_EXECUTE);
2509 return DISAS_NORETURN;
2512 update_psw_addr(s);
2513 update_cc_op(s);
2515 if (r1 == 0) {
2516 v1 = tcg_const_i64(0);
2517 } else {
2518 v1 = regs[r1];
2521 ilen = tcg_const_i32(s->ilen);
2522 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2523 tcg_temp_free_i32(ilen);
2525 if (r1 == 0) {
2526 tcg_temp_free_i64(v1);
2529 return DISAS_PC_CC_UPDATED;
2532 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2534 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2536 if (!m34) {
2537 return DISAS_NORETURN;
2539 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2540 tcg_temp_free_i32(m34);
2541 return DISAS_NEXT;
2544 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2546 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2548 if (!m34) {
2549 return DISAS_NORETURN;
2551 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2552 tcg_temp_free_i32(m34);
2553 return DISAS_NEXT;
2556 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2558 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2560 if (!m34) {
2561 return DISAS_NORETURN;
2563 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2564 return_low128(o->out2);
2565 tcg_temp_free_i32(m34);
2566 return DISAS_NEXT;
2569 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2571 /* We'll use the original input for cc computation, since we get to
2572 compare that against 0, which ought to be better than comparing
2573 the real output against 64. It also lets cc_dst be a convenient
2574 temporary during our computation. */
2575 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2577 /* R1 = IN ? CLZ(IN) : 64. */
2578 tcg_gen_clzi_i64(o->out, o->in2, 64);
2580 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2581 value by 64, which is undefined. But since the shift is 64 iff the
2582 input is zero, we still get the correct result after and'ing. */
2583 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2584 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2585 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2586 return DISAS_NEXT;
2589 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2591 int m3 = get_field(s, m3);
2592 int pos, len, base = s->insn->data;
2593 TCGv_i64 tmp = tcg_temp_new_i64();
2594 uint64_t ccm;
2596 switch (m3) {
2597 case 0xf:
2598 /* Effectively a 32-bit load. */
2599 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2600 len = 32;
2601 goto one_insert;
2603 case 0xc:
2604 case 0x6:
2605 case 0x3:
2606 /* Effectively a 16-bit load. */
2607 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2608 len = 16;
2609 goto one_insert;
2611 case 0x8:
2612 case 0x4:
2613 case 0x2:
2614 case 0x1:
2615 /* Effectively an 8-bit load. */
2616 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2617 len = 8;
2618 goto one_insert;
2620 one_insert:
2621 pos = base + ctz32(m3) * 8;
2622 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2623 ccm = ((1ull << len) - 1) << pos;
2624 break;
2626 default:
2627 /* This is going to be a sequence of loads and inserts. */
2628 pos = base + 32 - 8;
2629 ccm = 0;
2630 while (m3) {
2631 if (m3 & 0x8) {
2632 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2633 tcg_gen_addi_i64(o->in2, o->in2, 1);
2634 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2635 ccm |= 0xff << pos;
2637 m3 = (m3 << 1) & 0xf;
2638 pos -= 8;
2640 break;
2643 tcg_gen_movi_i64(tmp, ccm);
2644 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2645 tcg_temp_free_i64(tmp);
2646 return DISAS_NEXT;
2649 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2651 int shift = s->insn->data & 0xff;
2652 int size = s->insn->data >> 8;
2653 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2654 return DISAS_NEXT;
2657 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2659 TCGv_i64 t1, t2;
2661 gen_op_calc_cc(s);
2662 t1 = tcg_temp_new_i64();
2663 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2664 t2 = tcg_temp_new_i64();
2665 tcg_gen_extu_i32_i64(t2, cc_op);
2666 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2667 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2668 tcg_temp_free_i64(t1);
2669 tcg_temp_free_i64(t2);
2670 return DISAS_NEXT;
2673 #ifndef CONFIG_USER_ONLY
2674 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2676 TCGv_i32 m4;
2678 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2679 m4 = tcg_const_i32(get_field(s, m4));
2680 } else {
2681 m4 = tcg_const_i32(0);
2683 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2684 tcg_temp_free_i32(m4);
2685 return DISAS_NEXT;
2688 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2690 TCGv_i32 m4;
2692 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2693 m4 = tcg_const_i32(get_field(s, m4));
2694 } else {
2695 m4 = tcg_const_i32(0);
2697 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2698 tcg_temp_free_i32(m4);
2699 return DISAS_NEXT;
2702 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2704 gen_helper_iske(o->out, cpu_env, o->in2);
2705 return DISAS_NEXT;
2707 #endif
2709 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2711 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2712 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2713 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2714 TCGv_i32 t_r1, t_r2, t_r3, type;
2716 switch (s->insn->data) {
2717 case S390_FEAT_TYPE_KMA:
2718 if (r3 == r1 || r3 == r2) {
2719 gen_program_exception(s, PGM_SPECIFICATION);
2720 return DISAS_NORETURN;
2722 /* FALL THROUGH */
2723 case S390_FEAT_TYPE_KMCTR:
2724 if (r3 & 1 || !r3) {
2725 gen_program_exception(s, PGM_SPECIFICATION);
2726 return DISAS_NORETURN;
2728 /* FALL THROUGH */
2729 case S390_FEAT_TYPE_PPNO:
2730 case S390_FEAT_TYPE_KMF:
2731 case S390_FEAT_TYPE_KMC:
2732 case S390_FEAT_TYPE_KMO:
2733 case S390_FEAT_TYPE_KM:
2734 if (r1 & 1 || !r1) {
2735 gen_program_exception(s, PGM_SPECIFICATION);
2736 return DISAS_NORETURN;
2738 /* FALL THROUGH */
2739 case S390_FEAT_TYPE_KMAC:
2740 case S390_FEAT_TYPE_KIMD:
2741 case S390_FEAT_TYPE_KLMD:
2742 if (r2 & 1 || !r2) {
2743 gen_program_exception(s, PGM_SPECIFICATION);
2744 return DISAS_NORETURN;
2746 /* FALL THROUGH */
2747 case S390_FEAT_TYPE_PCKMO:
2748 case S390_FEAT_TYPE_PCC:
2749 break;
2750 default:
2751 g_assert_not_reached();
2754 t_r1 = tcg_const_i32(r1);
2755 t_r2 = tcg_const_i32(r2);
2756 t_r3 = tcg_const_i32(r3);
2757 type = tcg_const_i32(s->insn->data);
2758 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2759 set_cc_static(s);
2760 tcg_temp_free_i32(t_r1);
2761 tcg_temp_free_i32(t_r2);
2762 tcg_temp_free_i32(t_r3);
2763 tcg_temp_free_i32(type);
2764 return DISAS_NEXT;
2767 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2769 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2770 set_cc_static(s);
2771 return DISAS_NEXT;
2774 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2776 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2777 set_cc_static(s);
2778 return DISAS_NEXT;
2781 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2783 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2784 set_cc_static(s);
2785 return DISAS_NEXT;
2788 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2790 /* The real output is indeed the original value in memory;
2791 recompute the addition for the computation of CC. */
2792 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2793 s->insn->data | MO_ALIGN);
2794 /* However, we need to recompute the addition for setting CC. */
2795 tcg_gen_add_i64(o->out, o->in1, o->in2);
2796 return DISAS_NEXT;
2799 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2801 /* The real output is indeed the original value in memory;
2802 recompute the addition for the computation of CC. */
2803 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2804 s->insn->data | MO_ALIGN);
2805 /* However, we need to recompute the operation for setting CC. */
2806 tcg_gen_and_i64(o->out, o->in1, o->in2);
2807 return DISAS_NEXT;
2810 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2812 /* The real output is indeed the original value in memory;
2813 recompute the addition for the computation of CC. */
2814 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2815 s->insn->data | MO_ALIGN);
2816 /* However, we need to recompute the operation for setting CC. */
2817 tcg_gen_or_i64(o->out, o->in1, o->in2);
2818 return DISAS_NEXT;
2821 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2823 /* The real output is indeed the original value in memory;
2824 recompute the addition for the computation of CC. */
2825 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2826 s->insn->data | MO_ALIGN);
2827 /* However, we need to recompute the operation for setting CC. */
2828 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2829 return DISAS_NEXT;
2832 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2834 gen_helper_ldeb(o->out, cpu_env, o->in2);
2835 return DISAS_NEXT;
2838 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2840 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2842 if (!m34) {
2843 return DISAS_NORETURN;
2845 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2846 tcg_temp_free_i32(m34);
2847 return DISAS_NEXT;
2850 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2852 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2854 if (!m34) {
2855 return DISAS_NORETURN;
2857 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2858 tcg_temp_free_i32(m34);
2859 return DISAS_NEXT;
2862 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2864 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2866 if (!m34) {
2867 return DISAS_NORETURN;
2869 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2870 tcg_temp_free_i32(m34);
2871 return DISAS_NEXT;
2874 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2876 gen_helper_lxdb(o->out, cpu_env, o->in2);
2877 return_low128(o->out2);
2878 return DISAS_NEXT;
2881 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2883 gen_helper_lxeb(o->out, cpu_env, o->in2);
2884 return_low128(o->out2);
2885 return DISAS_NEXT;
2888 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2890 tcg_gen_shli_i64(o->out, o->in2, 32);
2891 return DISAS_NEXT;
2894 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2896 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2897 return DISAS_NEXT;
2900 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2902 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2903 return DISAS_NEXT;
2906 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2908 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2909 return DISAS_NEXT;
2912 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2914 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2915 return DISAS_NEXT;
2918 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2920 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2921 return DISAS_NEXT;
2924 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2926 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2927 return DISAS_NEXT;
2930 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2932 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2933 return DISAS_NEXT;
2936 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2938 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2939 return DISAS_NEXT;
2942 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2944 TCGLabel *lab = gen_new_label();
2945 store_reg32_i64(get_field(s, r1), o->in2);
2946 /* The value is stored even in case of trap. */
2947 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2948 gen_trap(s);
2949 gen_set_label(lab);
2950 return DISAS_NEXT;
2953 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2955 TCGLabel *lab = gen_new_label();
2956 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2957 /* The value is stored even in case of trap. */
2958 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2959 gen_trap(s);
2960 gen_set_label(lab);
2961 return DISAS_NEXT;
2964 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2966 TCGLabel *lab = gen_new_label();
2967 store_reg32h_i64(get_field(s, r1), o->in2);
2968 /* The value is stored even in case of trap. */
2969 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2970 gen_trap(s);
2971 gen_set_label(lab);
2972 return DISAS_NEXT;
2975 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2977 TCGLabel *lab = gen_new_label();
2978 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2979 /* The value is stored even in case of trap. */
2980 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2981 gen_trap(s);
2982 gen_set_label(lab);
2983 return DISAS_NEXT;
2986 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2988 TCGLabel *lab = gen_new_label();
2989 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2990 /* The value is stored even in case of trap. */
2991 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2992 gen_trap(s);
2993 gen_set_label(lab);
2994 return DISAS_NEXT;
2997 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2999 DisasCompare c;
3001 disas_jcc(s, &c, get_field(s, m3));
3003 if (c.is_64) {
3004 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
3005 o->in2, o->in1);
3006 free_compare(&c);
3007 } else {
3008 TCGv_i32 t32 = tcg_temp_new_i32();
3009 TCGv_i64 t, z;
3011 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3012 free_compare(&c);
3014 t = tcg_temp_new_i64();
3015 tcg_gen_extu_i32_i64(t, t32);
3016 tcg_temp_free_i32(t32);
3018 z = tcg_const_i64(0);
3019 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3020 tcg_temp_free_i64(t);
3021 tcg_temp_free_i64(z);
3024 return DISAS_NEXT;
3027 #ifndef CONFIG_USER_ONLY
3028 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3030 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3031 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3032 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3033 tcg_temp_free_i32(r1);
3034 tcg_temp_free_i32(r3);
3035 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3036 return DISAS_PC_STALE_NOCHAIN;
3039 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3041 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3042 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3043 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3044 tcg_temp_free_i32(r1);
3045 tcg_temp_free_i32(r3);
3046 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3047 return DISAS_PC_STALE_NOCHAIN;
3050 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3052 gen_helper_lra(o->out, cpu_env, o->in2);
3053 set_cc_static(s);
3054 return DISAS_NEXT;
3057 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3059 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3060 return DISAS_NEXT;
3063 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3065 TCGv_i64 t1, t2;
3067 per_breaking_event(s);
3069 t1 = tcg_temp_new_i64();
3070 t2 = tcg_temp_new_i64();
3071 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3072 MO_TEUL | MO_ALIGN_8);
3073 tcg_gen_addi_i64(o->in2, o->in2, 4);
3074 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3075 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3076 tcg_gen_shli_i64(t1, t1, 32);
3077 gen_helper_load_psw(cpu_env, t1, t2);
3078 tcg_temp_free_i64(t1);
3079 tcg_temp_free_i64(t2);
3080 return DISAS_NORETURN;
3083 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3085 TCGv_i64 t1, t2;
3087 per_breaking_event(s);
3089 t1 = tcg_temp_new_i64();
3090 t2 = tcg_temp_new_i64();
3091 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3092 MO_TEQ | MO_ALIGN_8);
3093 tcg_gen_addi_i64(o->in2, o->in2, 8);
3094 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3095 gen_helper_load_psw(cpu_env, t1, t2);
3096 tcg_temp_free_i64(t1);
3097 tcg_temp_free_i64(t2);
3098 return DISAS_NORETURN;
3100 #endif
3102 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3104 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3105 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3106 gen_helper_lam(cpu_env, r1, o->in2, r3);
3107 tcg_temp_free_i32(r1);
3108 tcg_temp_free_i32(r3);
3109 return DISAS_NEXT;
3112 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3114 int r1 = get_field(s, r1);
3115 int r3 = get_field(s, r3);
3116 TCGv_i64 t1, t2;
3118 /* Only one register to read. */
3119 t1 = tcg_temp_new_i64();
3120 if (unlikely(r1 == r3)) {
3121 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3122 store_reg32_i64(r1, t1);
3123 tcg_temp_free(t1);
3124 return DISAS_NEXT;
3127 /* First load the values of the first and last registers to trigger
3128 possible page faults. */
3129 t2 = tcg_temp_new_i64();
3130 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3131 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3132 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3133 store_reg32_i64(r1, t1);
3134 store_reg32_i64(r3, t2);
3136 /* Only two registers to read. */
3137 if (((r1 + 1) & 15) == r3) {
3138 tcg_temp_free(t2);
3139 tcg_temp_free(t1);
3140 return DISAS_NEXT;
3143 /* Then load the remaining registers. Page fault can't occur. */
3144 r3 = (r3 - 1) & 15;
3145 tcg_gen_movi_i64(t2, 4);
3146 while (r1 != r3) {
3147 r1 = (r1 + 1) & 15;
3148 tcg_gen_add_i64(o->in2, o->in2, t2);
3149 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3150 store_reg32_i64(r1, t1);
3152 tcg_temp_free(t2);
3153 tcg_temp_free(t1);
3155 return DISAS_NEXT;
3158 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3160 int r1 = get_field(s, r1);
3161 int r3 = get_field(s, r3);
3162 TCGv_i64 t1, t2;
3164 /* Only one register to read. */
3165 t1 = tcg_temp_new_i64();
3166 if (unlikely(r1 == r3)) {
3167 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3168 store_reg32h_i64(r1, t1);
3169 tcg_temp_free(t1);
3170 return DISAS_NEXT;
3173 /* First load the values of the first and last registers to trigger
3174 possible page faults. */
3175 t2 = tcg_temp_new_i64();
3176 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3177 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3178 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3179 store_reg32h_i64(r1, t1);
3180 store_reg32h_i64(r3, t2);
3182 /* Only two registers to read. */
3183 if (((r1 + 1) & 15) == r3) {
3184 tcg_temp_free(t2);
3185 tcg_temp_free(t1);
3186 return DISAS_NEXT;
3189 /* Then load the remaining registers. Page fault can't occur. */
3190 r3 = (r3 - 1) & 15;
3191 tcg_gen_movi_i64(t2, 4);
3192 while (r1 != r3) {
3193 r1 = (r1 + 1) & 15;
3194 tcg_gen_add_i64(o->in2, o->in2, t2);
3195 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3196 store_reg32h_i64(r1, t1);
3198 tcg_temp_free(t2);
3199 tcg_temp_free(t1);
3201 return DISAS_NEXT;
3204 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3206 int r1 = get_field(s, r1);
3207 int r3 = get_field(s, r3);
3208 TCGv_i64 t1, t2;
3210 /* Only one register to read. */
3211 if (unlikely(r1 == r3)) {
3212 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3213 return DISAS_NEXT;
3216 /* First load the values of the first and last registers to trigger
3217 possible page faults. */
3218 t1 = tcg_temp_new_i64();
3219 t2 = tcg_temp_new_i64();
3220 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3221 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3222 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3223 tcg_gen_mov_i64(regs[r1], t1);
3224 tcg_temp_free(t2);
3226 /* Only two registers to read. */
3227 if (((r1 + 1) & 15) == r3) {
3228 tcg_temp_free(t1);
3229 return DISAS_NEXT;
3232 /* Then load the remaining registers. Page fault can't occur. */
3233 r3 = (r3 - 1) & 15;
3234 tcg_gen_movi_i64(t1, 8);
3235 while (r1 != r3) {
3236 r1 = (r1 + 1) & 15;
3237 tcg_gen_add_i64(o->in2, o->in2, t1);
3238 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3240 tcg_temp_free(t1);
3242 return DISAS_NEXT;
3245 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3247 TCGv_i64 a1, a2;
3248 MemOp mop = s->insn->data;
3250 /* In a parallel context, stop the world and single step. */
3251 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3252 update_psw_addr(s);
3253 update_cc_op(s);
3254 gen_exception(EXCP_ATOMIC);
3255 return DISAS_NORETURN;
3258 /* In a serial context, perform the two loads ... */
3259 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3260 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3261 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3262 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3263 tcg_temp_free_i64(a1);
3264 tcg_temp_free_i64(a2);
3266 /* ... and indicate that we performed them while interlocked. */
3267 gen_op_movi_cc(s, 0);
3268 return DISAS_NEXT;
3271 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3273 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3274 gen_helper_lpq(o->out, cpu_env, o->in2);
3275 } else if (HAVE_ATOMIC128) {
3276 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3277 } else {
3278 gen_helper_exit_atomic(cpu_env);
3279 return DISAS_NORETURN;
3281 return_low128(o->out2);
3282 return DISAS_NEXT;
3285 #ifndef CONFIG_USER_ONLY
3286 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3288 tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3289 return DISAS_NEXT;
3291 #endif
3293 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3295 tcg_gen_andi_i64(o->out, o->in2, -256);
3296 return DISAS_NEXT;
3299 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3301 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3303 if (get_field(s, m3) > 6) {
3304 gen_program_exception(s, PGM_SPECIFICATION);
3305 return DISAS_NORETURN;
3308 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3309 tcg_gen_neg_i64(o->addr1, o->addr1);
3310 tcg_gen_movi_i64(o->out, 16);
3311 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3312 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3313 return DISAS_NEXT;
3316 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3318 #if !defined(CONFIG_USER_ONLY)
3319 TCGv_i32 i2;
3320 #endif
3321 const uint16_t monitor_class = get_field(s, i2);
3323 if (monitor_class & 0xff00) {
3324 gen_program_exception(s, PGM_SPECIFICATION);
3325 return DISAS_NORETURN;
3328 #if !defined(CONFIG_USER_ONLY)
3329 i2 = tcg_const_i32(monitor_class);
3330 gen_helper_monitor_call(cpu_env, o->addr1, i2);
3331 tcg_temp_free_i32(i2);
3332 #endif
3333 /* Defaults to a NOP. */
3334 return DISAS_NEXT;
3337 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3339 o->out = o->in2;
3340 o->g_out = o->g_in2;
3341 o->in2 = NULL;
3342 o->g_in2 = false;
3343 return DISAS_NEXT;
3346 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3348 int b2 = get_field(s, b2);
3349 TCGv ar1 = tcg_temp_new_i64();
3351 o->out = o->in2;
3352 o->g_out = o->g_in2;
3353 o->in2 = NULL;
3354 o->g_in2 = false;
3356 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3357 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3358 tcg_gen_movi_i64(ar1, 0);
3359 break;
3360 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3361 tcg_gen_movi_i64(ar1, 1);
3362 break;
3363 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3364 if (b2) {
3365 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3366 } else {
3367 tcg_gen_movi_i64(ar1, 0);
3369 break;
3370 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3371 tcg_gen_movi_i64(ar1, 2);
3372 break;
3375 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3376 tcg_temp_free_i64(ar1);
3378 return DISAS_NEXT;
3381 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3383 o->out = o->in1;
3384 o->out2 = o->in2;
3385 o->g_out = o->g_in1;
3386 o->g_out2 = o->g_in2;
3387 o->in1 = NULL;
3388 o->in2 = NULL;
3389 o->g_in1 = o->g_in2 = false;
3390 return DISAS_NEXT;
3393 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3395 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3396 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3397 tcg_temp_free_i32(l);
3398 return DISAS_NEXT;
3401 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3403 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3404 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3405 tcg_temp_free_i32(l);
3406 return DISAS_NEXT;
3409 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3411 int r1 = get_field(s, r1);
3412 int r2 = get_field(s, r2);
3413 TCGv_i32 t1, t2;
3415 /* r1 and r2 must be even. */
3416 if (r1 & 1 || r2 & 1) {
3417 gen_program_exception(s, PGM_SPECIFICATION);
3418 return DISAS_NORETURN;
3421 t1 = tcg_const_i32(r1);
3422 t2 = tcg_const_i32(r2);
3423 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3424 tcg_temp_free_i32(t1);
3425 tcg_temp_free_i32(t2);
3426 set_cc_static(s);
3427 return DISAS_NEXT;
3430 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3432 int r1 = get_field(s, r1);
3433 int r3 = get_field(s, r3);
3434 TCGv_i32 t1, t3;
3436 /* r1 and r3 must be even. */
3437 if (r1 & 1 || r3 & 1) {
3438 gen_program_exception(s, PGM_SPECIFICATION);
3439 return DISAS_NORETURN;
3442 t1 = tcg_const_i32(r1);
3443 t3 = tcg_const_i32(r3);
3444 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3445 tcg_temp_free_i32(t1);
3446 tcg_temp_free_i32(t3);
3447 set_cc_static(s);
3448 return DISAS_NEXT;
3451 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3453 int r1 = get_field(s, r1);
3454 int r3 = get_field(s, r3);
3455 TCGv_i32 t1, t3;
3457 /* r1 and r3 must be even. */
3458 if (r1 & 1 || r3 & 1) {
3459 gen_program_exception(s, PGM_SPECIFICATION);
3460 return DISAS_NORETURN;
3463 t1 = tcg_const_i32(r1);
3464 t3 = tcg_const_i32(r3);
3465 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3466 tcg_temp_free_i32(t1);
3467 tcg_temp_free_i32(t3);
3468 set_cc_static(s);
3469 return DISAS_NEXT;
3472 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3474 int r3 = get_field(s, r3);
3475 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3476 set_cc_static(s);
3477 return DISAS_NEXT;
3480 #ifndef CONFIG_USER_ONLY
3481 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3483 int r1 = get_field(s, l1);
3484 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3485 set_cc_static(s);
3486 return DISAS_NEXT;
3489 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3491 int r1 = get_field(s, l1);
3492 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3493 set_cc_static(s);
3494 return DISAS_NEXT;
3496 #endif
3498 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3500 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3501 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3502 tcg_temp_free_i32(l);
3503 return DISAS_NEXT;
3506 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3508 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3509 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3510 tcg_temp_free_i32(l);
3511 return DISAS_NEXT;
3514 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3516 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3517 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3519 gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3520 tcg_temp_free_i32(t1);
3521 tcg_temp_free_i32(t2);
3522 set_cc_static(s);
3523 return DISAS_NEXT;
3526 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3528 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3529 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3531 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3532 tcg_temp_free_i32(t1);
3533 tcg_temp_free_i32(t2);
3534 set_cc_static(s);
3535 return DISAS_NEXT;
3538 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3540 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3541 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3542 tcg_temp_free_i32(l);
3543 return DISAS_NEXT;
3546 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3548 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3549 return DISAS_NEXT;
3552 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3554 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3555 return DISAS_NEXT;
3558 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3560 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3561 return DISAS_NEXT;
3564 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3566 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3567 return DISAS_NEXT;
3570 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3572 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3573 return DISAS_NEXT;
3576 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3578 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3579 return DISAS_NEXT;
3582 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3584 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3585 return_low128(o->out2);
3586 return DISAS_NEXT;
3589 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3591 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3592 return_low128(o->out2);
3593 return DISAS_NEXT;
3596 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3598 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3599 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3600 tcg_temp_free_i64(r3);
3601 return DISAS_NEXT;
3604 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3606 TCGv_i64 r3 = load_freg(get_field(s, r3));
3607 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3608 tcg_temp_free_i64(r3);
3609 return DISAS_NEXT;
3612 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3614 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3615 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3616 tcg_temp_free_i64(r3);
3617 return DISAS_NEXT;
3620 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3622 TCGv_i64 r3 = load_freg(get_field(s, r3));
3623 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3624 tcg_temp_free_i64(r3);
3625 return DISAS_NEXT;
3628 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3630 TCGv_i64 z, n;
3631 z = tcg_const_i64(0);
3632 n = tcg_temp_new_i64();
3633 tcg_gen_neg_i64(n, o->in2);
3634 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3635 tcg_temp_free_i64(n);
3636 tcg_temp_free_i64(z);
3637 return DISAS_NEXT;
3640 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3642 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3643 return DISAS_NEXT;
3646 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3648 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3649 return DISAS_NEXT;
3652 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3654 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3655 tcg_gen_mov_i64(o->out2, o->in2);
3656 return DISAS_NEXT;
3659 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3661 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3662 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3663 tcg_temp_free_i32(l);
3664 set_cc_static(s);
3665 return DISAS_NEXT;
3668 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3670 tcg_gen_neg_i64(o->out, o->in2);
3671 return DISAS_NEXT;
3674 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3676 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3677 return DISAS_NEXT;
3680 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3682 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3683 return DISAS_NEXT;
3686 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3688 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3689 tcg_gen_mov_i64(o->out2, o->in2);
3690 return DISAS_NEXT;
3693 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3695 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3696 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3697 tcg_temp_free_i32(l);
3698 set_cc_static(s);
3699 return DISAS_NEXT;
3702 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3704 tcg_gen_or_i64(o->out, o->in1, o->in2);
3705 return DISAS_NEXT;
3708 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3710 int shift = s->insn->data & 0xff;
3711 int size = s->insn->data >> 8;
3712 uint64_t mask = ((1ull << size) - 1) << shift;
3714 assert(!o->g_in2);
3715 tcg_gen_shli_i64(o->in2, o->in2, shift);
3716 tcg_gen_or_i64(o->out, o->in1, o->in2);
3718 /* Produce the CC from only the bits manipulated. */
3719 tcg_gen_andi_i64(cc_dst, o->out, mask);
3720 set_cc_nz_u64(s, cc_dst);
3721 return DISAS_NEXT;
3724 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3726 o->in1 = tcg_temp_new_i64();
3728 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3729 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3730 } else {
3731 /* Perform the atomic operation in memory. */
3732 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3733 s->insn->data);
3736 /* Recompute also for atomic case: needed for setting CC. */
3737 tcg_gen_or_i64(o->out, o->in1, o->in2);
3739 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3740 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3742 return DISAS_NEXT;
3745 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3747 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3748 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3749 tcg_temp_free_i32(l);
3750 return DISAS_NEXT;
3753 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3755 int l2 = get_field(s, l2) + 1;
3756 TCGv_i32 l;
3758 /* The length must not exceed 32 bytes. */
3759 if (l2 > 32) {
3760 gen_program_exception(s, PGM_SPECIFICATION);
3761 return DISAS_NORETURN;
3763 l = tcg_const_i32(l2);
3764 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3765 tcg_temp_free_i32(l);
3766 return DISAS_NEXT;
3769 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3771 int l2 = get_field(s, l2) + 1;
3772 TCGv_i32 l;
3774 /* The length must be even and should not exceed 64 bytes. */
3775 if ((l2 & 1) || (l2 > 64)) {
3776 gen_program_exception(s, PGM_SPECIFICATION);
3777 return DISAS_NORETURN;
3779 l = tcg_const_i32(l2);
3780 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3781 tcg_temp_free_i32(l);
3782 return DISAS_NEXT;
3785 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3787 gen_helper_popcnt(o->out, o->in2);
3788 return DISAS_NEXT;
3791 #ifndef CONFIG_USER_ONLY
3792 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3794 gen_helper_ptlb(cpu_env);
3795 return DISAS_NEXT;
3797 #endif
3799 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3801 int i3 = get_field(s, i3);
3802 int i4 = get_field(s, i4);
3803 int i5 = get_field(s, i5);
3804 int do_zero = i4 & 0x80;
3805 uint64_t mask, imask, pmask;
3806 int pos, len, rot;
3808 /* Adjust the arguments for the specific insn. */
3809 switch (s->fields.op2) {
3810 case 0x55: /* risbg */
3811 case 0x59: /* risbgn */
3812 i3 &= 63;
3813 i4 &= 63;
3814 pmask = ~0;
3815 break;
3816 case 0x5d: /* risbhg */
3817 i3 &= 31;
3818 i4 &= 31;
3819 pmask = 0xffffffff00000000ull;
3820 break;
3821 case 0x51: /* risblg */
3822 i3 = (i3 & 31) + 32;
3823 i4 = (i4 & 31) + 32;
3824 pmask = 0x00000000ffffffffull;
3825 break;
3826 default:
3827 g_assert_not_reached();
3830 /* MASK is the set of bits to be inserted from R2. */
3831 if (i3 <= i4) {
3832 /* [0...i3---i4...63] */
3833 mask = (-1ull >> i3) & (-1ull << (63 - i4));
3834 } else {
3835 /* [0---i4...i3---63] */
3836 mask = (-1ull >> i3) | (-1ull << (63 - i4));
3838 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3839 mask &= pmask;
3841 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3842 insns, we need to keep the other half of the register. */
3843 imask = ~mask | ~pmask;
3844 if (do_zero) {
3845 imask = ~pmask;
3848 len = i4 - i3 + 1;
3849 pos = 63 - i4;
3850 rot = i5 & 63;
3852 /* In some cases we can implement this with extract. */
3853 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3854 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3855 return DISAS_NEXT;
3858 /* In some cases we can implement this with deposit. */
3859 if (len > 0 && (imask == 0 || ~mask == imask)) {
3860 /* Note that we rotate the bits to be inserted to the lsb, not to
3861 the position as described in the PoO. */
3862 rot = (rot - pos) & 63;
3863 } else {
3864 pos = -1;
3867 /* Rotate the input as necessary. */
3868 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3870 /* Insert the selected bits into the output. */
3871 if (pos >= 0) {
3872 if (imask == 0) {
3873 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3874 } else {
3875 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3877 } else if (imask == 0) {
3878 tcg_gen_andi_i64(o->out, o->in2, mask);
3879 } else {
3880 tcg_gen_andi_i64(o->in2, o->in2, mask);
3881 tcg_gen_andi_i64(o->out, o->out, imask);
3882 tcg_gen_or_i64(o->out, o->out, o->in2);
3884 return DISAS_NEXT;
3887 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3889 int i3 = get_field(s, i3);
3890 int i4 = get_field(s, i4);
3891 int i5 = get_field(s, i5);
3892 uint64_t mask;
3894 /* If this is a test-only form, arrange to discard the result. */
3895 if (i3 & 0x80) {
3896 o->out = tcg_temp_new_i64();
3897 o->g_out = false;
3900 i3 &= 63;
3901 i4 &= 63;
3902 i5 &= 63;
3904 /* MASK is the set of bits to be operated on from R2.
3905 Take care for I3/I4 wraparound. */
3906 mask = ~0ull >> i3;
3907 if (i3 <= i4) {
3908 mask ^= ~0ull >> i4 >> 1;
3909 } else {
3910 mask |= ~(~0ull >> i4 >> 1);
3913 /* Rotate the input as necessary. */
3914 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3916 /* Operate. */
3917 switch (s->fields.op2) {
3918 case 0x54: /* AND */
3919 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3920 tcg_gen_and_i64(o->out, o->out, o->in2);
3921 break;
3922 case 0x56: /* OR */
3923 tcg_gen_andi_i64(o->in2, o->in2, mask);
3924 tcg_gen_or_i64(o->out, o->out, o->in2);
3925 break;
3926 case 0x57: /* XOR */
3927 tcg_gen_andi_i64(o->in2, o->in2, mask);
3928 tcg_gen_xor_i64(o->out, o->out, o->in2);
3929 break;
3930 default:
3931 abort();
3934 /* Set the CC. */
3935 tcg_gen_andi_i64(cc_dst, o->out, mask);
3936 set_cc_nz_u64(s, cc_dst);
3937 return DISAS_NEXT;
3940 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3942 tcg_gen_bswap16_i64(o->out, o->in2);
3943 return DISAS_NEXT;
3946 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3948 tcg_gen_bswap32_i64(o->out, o->in2);
3949 return DISAS_NEXT;
3952 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3954 tcg_gen_bswap64_i64(o->out, o->in2);
3955 return DISAS_NEXT;
3958 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3960 TCGv_i32 t1 = tcg_temp_new_i32();
3961 TCGv_i32 t2 = tcg_temp_new_i32();
3962 TCGv_i32 to = tcg_temp_new_i32();
3963 tcg_gen_extrl_i64_i32(t1, o->in1);
3964 tcg_gen_extrl_i64_i32(t2, o->in2);
3965 tcg_gen_rotl_i32(to, t1, t2);
3966 tcg_gen_extu_i32_i64(o->out, to);
3967 tcg_temp_free_i32(t1);
3968 tcg_temp_free_i32(t2);
3969 tcg_temp_free_i32(to);
3970 return DISAS_NEXT;
3973 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3975 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3976 return DISAS_NEXT;
3979 #ifndef CONFIG_USER_ONLY
3980 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3982 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3983 set_cc_static(s);
3984 return DISAS_NEXT;
3987 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3989 gen_helper_sacf(cpu_env, o->in2);
3990 /* Addressing mode has changed, so end the block. */
3991 return DISAS_PC_STALE;
3993 #endif
3995 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3997 int sam = s->insn->data;
3998 TCGv_i64 tsam;
3999 uint64_t mask;
4001 switch (sam) {
4002 case 0:
4003 mask = 0xffffff;
4004 break;
4005 case 1:
4006 mask = 0x7fffffff;
4007 break;
4008 default:
4009 mask = -1;
4010 break;
4013 /* Bizarre but true, we check the address of the current insn for the
4014 specification exception, not the next to be executed. Thus the PoO
4015 documents that Bad Things Happen two bytes before the end. */
4016 if (s->base.pc_next & ~mask) {
4017 gen_program_exception(s, PGM_SPECIFICATION);
4018 return DISAS_NORETURN;
4020 s->pc_tmp &= mask;
4022 tsam = tcg_const_i64(sam);
4023 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4024 tcg_temp_free_i64(tsam);
4026 /* Always exit the TB, since we (may have) changed execution mode. */
4027 return DISAS_PC_STALE;
4030 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4032 int r1 = get_field(s, r1);
4033 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4034 return DISAS_NEXT;
4037 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4039 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4040 return DISAS_NEXT;
4043 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4045 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4046 return DISAS_NEXT;
4049 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4051 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4052 return_low128(o->out2);
4053 return DISAS_NEXT;
4056 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4058 gen_helper_sqeb(o->out, cpu_env, o->in2);
4059 return DISAS_NEXT;
4062 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4064 gen_helper_sqdb(o->out, cpu_env, o->in2);
4065 return DISAS_NEXT;
4068 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4070 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4071 return_low128(o->out2);
4072 return DISAS_NEXT;
4075 #ifndef CONFIG_USER_ONLY
4076 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4078 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4079 set_cc_static(s);
4080 return DISAS_NEXT;
4083 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4085 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4086 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4087 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4088 set_cc_static(s);
4089 tcg_temp_free_i32(r1);
4090 tcg_temp_free_i32(r3);
4091 return DISAS_NEXT;
4093 #endif
4095 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4097 DisasCompare c;
4098 TCGv_i64 a, h;
4099 TCGLabel *lab;
4100 int r1;
4102 disas_jcc(s, &c, get_field(s, m3));
4104 /* We want to store when the condition is fulfilled, so branch
4105 out when it's not */
4106 c.cond = tcg_invert_cond(c.cond);
4108 lab = gen_new_label();
4109 if (c.is_64) {
4110 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4111 } else {
4112 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4114 free_compare(&c);
4116 r1 = get_field(s, r1);
4117 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4118 switch (s->insn->data) {
4119 case 1: /* STOCG */
4120 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4121 break;
4122 case 0: /* STOC */
4123 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4124 break;
4125 case 2: /* STOCFH */
4126 h = tcg_temp_new_i64();
4127 tcg_gen_shri_i64(h, regs[r1], 32);
4128 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4129 tcg_temp_free_i64(h);
4130 break;
4131 default:
4132 g_assert_not_reached();
4134 tcg_temp_free_i64(a);
4136 gen_set_label(lab);
4137 return DISAS_NEXT;
4140 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4142 uint64_t sign = 1ull << s->insn->data;
4143 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4144 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4145 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4146 /* The arithmetic left shift is curious in that it does not affect
4147 the sign bit. Copy that over from the source unchanged. */
4148 tcg_gen_andi_i64(o->out, o->out, ~sign);
4149 tcg_gen_andi_i64(o->in1, o->in1, sign);
4150 tcg_gen_or_i64(o->out, o->out, o->in1);
4151 return DISAS_NEXT;
4154 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4156 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4157 return DISAS_NEXT;
4160 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4162 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4163 return DISAS_NEXT;
4166 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4168 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4169 return DISAS_NEXT;
4172 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4174 gen_helper_sfpc(cpu_env, o->in2);
4175 return DISAS_NEXT;
4178 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4180 gen_helper_sfas(cpu_env, o->in2);
4181 return DISAS_NEXT;
4184 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4186 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4187 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4188 gen_helper_srnm(cpu_env, o->addr1);
4189 return DISAS_NEXT;
4192 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4194 /* Bits 0-55 are are ignored. */
4195 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4196 gen_helper_srnm(cpu_env, o->addr1);
4197 return DISAS_NEXT;
4200 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4202 TCGv_i64 tmp = tcg_temp_new_i64();
4204 /* Bits other than 61-63 are ignored. */
4205 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4207 /* No need to call a helper, we don't implement dfp */
4208 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4209 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4210 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4212 tcg_temp_free_i64(tmp);
4213 return DISAS_NEXT;
4216 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4218 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4219 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4220 set_cc_static(s);
4222 tcg_gen_shri_i64(o->in1, o->in1, 24);
4223 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4224 return DISAS_NEXT;
4227 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4229 int b1 = get_field(s, b1);
4230 int d1 = get_field(s, d1);
4231 int b2 = get_field(s, b2);
4232 int d2 = get_field(s, d2);
4233 int r3 = get_field(s, r3);
4234 TCGv_i64 tmp = tcg_temp_new_i64();
4236 /* fetch all operands first */
4237 o->in1 = tcg_temp_new_i64();
4238 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4239 o->in2 = tcg_temp_new_i64();
4240 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4241 o->addr1 = tcg_temp_new_i64();
4242 gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4244 /* load the third operand into r3 before modifying anything */
4245 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4247 /* subtract CPU timer from first operand and store in GR0 */
4248 gen_helper_stpt(tmp, cpu_env);
4249 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4251 /* store second operand in GR1 */
4252 tcg_gen_mov_i64(regs[1], o->in2);
4254 tcg_temp_free_i64(tmp);
4255 return DISAS_NEXT;
4258 #ifndef CONFIG_USER_ONLY
4259 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4261 tcg_gen_shri_i64(o->in2, o->in2, 4);
4262 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4263 return DISAS_NEXT;
4266 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4268 gen_helper_sske(cpu_env, o->in1, o->in2);
4269 return DISAS_NEXT;
4272 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4274 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4275 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4276 return DISAS_PC_STALE_NOCHAIN;
4279 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4281 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4282 return DISAS_NEXT;
4284 #endif
4286 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4288 gen_helper_stck(o->out, cpu_env);
4289 /* ??? We don't implement clock states. */
4290 gen_op_movi_cc(s, 0);
4291 return DISAS_NEXT;
4294 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4296 TCGv_i64 c1 = tcg_temp_new_i64();
4297 TCGv_i64 c2 = tcg_temp_new_i64();
4298 TCGv_i64 todpr = tcg_temp_new_i64();
4299 gen_helper_stck(c1, cpu_env);
4300 /* 16 bit value store in an uint32_t (only valid bits set) */
4301 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4302 /* Shift the 64-bit value into its place as a zero-extended
4303 104-bit value. Note that "bit positions 64-103 are always
4304 non-zero so that they compare differently to STCK"; we set
4305 the least significant bit to 1. */
4306 tcg_gen_shli_i64(c2, c1, 56);
4307 tcg_gen_shri_i64(c1, c1, 8);
4308 tcg_gen_ori_i64(c2, c2, 0x10000);
4309 tcg_gen_or_i64(c2, c2, todpr);
4310 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4311 tcg_gen_addi_i64(o->in2, o->in2, 8);
4312 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4313 tcg_temp_free_i64(c1);
4314 tcg_temp_free_i64(c2);
4315 tcg_temp_free_i64(todpr);
4316 /* ??? We don't implement clock states. */
4317 gen_op_movi_cc(s, 0);
4318 return DISAS_NEXT;
4321 #ifndef CONFIG_USER_ONLY
4322 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4324 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4325 gen_helper_sck(cc_op, cpu_env, o->in1);
4326 set_cc_static(s);
4327 return DISAS_NEXT;
4330 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4332 gen_helper_sckc(cpu_env, o->in2);
4333 return DISAS_NEXT;
4336 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4338 gen_helper_sckpf(cpu_env, regs[0]);
4339 return DISAS_NEXT;
4342 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4344 gen_helper_stckc(o->out, cpu_env);
4345 return DISAS_NEXT;
4348 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4350 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4351 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4352 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4353 tcg_temp_free_i32(r1);
4354 tcg_temp_free_i32(r3);
4355 return DISAS_NEXT;
4358 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4360 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4361 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4362 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4363 tcg_temp_free_i32(r1);
4364 tcg_temp_free_i32(r3);
4365 return DISAS_NEXT;
4368 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4370 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4371 return DISAS_NEXT;
4374 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4376 gen_helper_spt(cpu_env, o->in2);
4377 return DISAS_NEXT;
4380 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4382 gen_helper_stfl(cpu_env);
4383 return DISAS_NEXT;
4386 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4388 gen_helper_stpt(o->out, cpu_env);
4389 return DISAS_NEXT;
4392 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4394 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4395 set_cc_static(s);
4396 return DISAS_NEXT;
4399 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4401 gen_helper_spx(cpu_env, o->in2);
4402 return DISAS_NEXT;
4405 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4407 gen_helper_xsch(cpu_env, regs[1]);
4408 set_cc_static(s);
4409 return DISAS_NEXT;
4412 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4414 gen_helper_csch(cpu_env, regs[1]);
4415 set_cc_static(s);
4416 return DISAS_NEXT;
4419 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4421 gen_helper_hsch(cpu_env, regs[1]);
4422 set_cc_static(s);
4423 return DISAS_NEXT;
4426 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4428 gen_helper_msch(cpu_env, regs[1], o->in2);
4429 set_cc_static(s);
4430 return DISAS_NEXT;
4433 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4435 gen_helper_rchp(cpu_env, regs[1]);
4436 set_cc_static(s);
4437 return DISAS_NEXT;
4440 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4442 gen_helper_rsch(cpu_env, regs[1]);
4443 set_cc_static(s);
4444 return DISAS_NEXT;
4447 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4449 gen_helper_sal(cpu_env, regs[1]);
4450 return DISAS_NEXT;
4453 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4455 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4456 return DISAS_NEXT;
4459 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4461 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4462 gen_op_movi_cc(s, 3);
4463 return DISAS_NEXT;
4466 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4468 /* The instruction is suppressed if not provided. */
4469 return DISAS_NEXT;
4472 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4474 gen_helper_ssch(cpu_env, regs[1], o->in2);
4475 set_cc_static(s);
4476 return DISAS_NEXT;
4479 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4481 gen_helper_stsch(cpu_env, regs[1], o->in2);
4482 set_cc_static(s);
4483 return DISAS_NEXT;
4486 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4488 gen_helper_stcrw(cpu_env, o->in2);
4489 set_cc_static(s);
4490 return DISAS_NEXT;
4493 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4495 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4496 set_cc_static(s);
4497 return DISAS_NEXT;
4500 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4502 gen_helper_tsch(cpu_env, regs[1], o->in2);
4503 set_cc_static(s);
4504 return DISAS_NEXT;
4507 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4509 gen_helper_chsc(cpu_env, o->in2);
4510 set_cc_static(s);
4511 return DISAS_NEXT;
4514 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4516 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4517 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4518 return DISAS_NEXT;
4521 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4523 uint64_t i2 = get_field(s, i2);
4524 TCGv_i64 t;
4526 /* It is important to do what the instruction name says: STORE THEN.
4527 If we let the output hook perform the store then if we fault and
4528 restart, we'll have the wrong SYSTEM MASK in place. */
4529 t = tcg_temp_new_i64();
4530 tcg_gen_shri_i64(t, psw_mask, 56);
4531 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4532 tcg_temp_free_i64(t);
4534 if (s->fields.op == 0xac) {
4535 tcg_gen_andi_i64(psw_mask, psw_mask,
4536 (i2 << 56) | 0x00ffffffffffffffull);
4537 } else {
4538 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4541 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4542 return DISAS_PC_STALE_NOCHAIN;
4545 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4547 tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4549 if (s->base.tb->flags & FLAG_MASK_PER) {
4550 update_psw_addr(s);
4551 gen_helper_per_store_real(cpu_env);
4553 return DISAS_NEXT;
4555 #endif
4557 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4559 gen_helper_stfle(cc_op, cpu_env, o->in2);
4560 set_cc_static(s);
4561 return DISAS_NEXT;
4564 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4566 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4567 return DISAS_NEXT;
4570 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4572 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4573 return DISAS_NEXT;
4576 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4578 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4579 return DISAS_NEXT;
4582 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4584 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4585 return DISAS_NEXT;
4588 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4590 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4591 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4592 gen_helper_stam(cpu_env, r1, o->in2, r3);
4593 tcg_temp_free_i32(r1);
4594 tcg_temp_free_i32(r3);
4595 return DISAS_NEXT;
4598 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4600 int m3 = get_field(s, m3);
4601 int pos, base = s->insn->data;
4602 TCGv_i64 tmp = tcg_temp_new_i64();
4604 pos = base + ctz32(m3) * 8;
4605 switch (m3) {
4606 case 0xf:
4607 /* Effectively a 32-bit store. */
4608 tcg_gen_shri_i64(tmp, o->in1, pos);
4609 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4610 break;
4612 case 0xc:
4613 case 0x6:
4614 case 0x3:
4615 /* Effectively a 16-bit store. */
4616 tcg_gen_shri_i64(tmp, o->in1, pos);
4617 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4618 break;
4620 case 0x8:
4621 case 0x4:
4622 case 0x2:
4623 case 0x1:
4624 /* Effectively an 8-bit store. */
4625 tcg_gen_shri_i64(tmp, o->in1, pos);
4626 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4627 break;
4629 default:
4630 /* This is going to be a sequence of shifts and stores. */
4631 pos = base + 32 - 8;
4632 while (m3) {
4633 if (m3 & 0x8) {
4634 tcg_gen_shri_i64(tmp, o->in1, pos);
4635 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4636 tcg_gen_addi_i64(o->in2, o->in2, 1);
4638 m3 = (m3 << 1) & 0xf;
4639 pos -= 8;
4641 break;
4643 tcg_temp_free_i64(tmp);
4644 return DISAS_NEXT;
4647 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4649 int r1 = get_field(s, r1);
4650 int r3 = get_field(s, r3);
4651 int size = s->insn->data;
4652 TCGv_i64 tsize = tcg_const_i64(size);
4654 while (1) {
4655 if (size == 8) {
4656 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4657 } else {
4658 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4660 if (r1 == r3) {
4661 break;
4663 tcg_gen_add_i64(o->in2, o->in2, tsize);
4664 r1 = (r1 + 1) & 15;
4667 tcg_temp_free_i64(tsize);
4668 return DISAS_NEXT;
4671 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4673 int r1 = get_field(s, r1);
4674 int r3 = get_field(s, r3);
4675 TCGv_i64 t = tcg_temp_new_i64();
4676 TCGv_i64 t4 = tcg_const_i64(4);
4677 TCGv_i64 t32 = tcg_const_i64(32);
4679 while (1) {
4680 tcg_gen_shl_i64(t, regs[r1], t32);
4681 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4682 if (r1 == r3) {
4683 break;
4685 tcg_gen_add_i64(o->in2, o->in2, t4);
4686 r1 = (r1 + 1) & 15;
4689 tcg_temp_free_i64(t);
4690 tcg_temp_free_i64(t4);
4691 tcg_temp_free_i64(t32);
4692 return DISAS_NEXT;
4695 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4697 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4698 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4699 } else if (HAVE_ATOMIC128) {
4700 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4701 } else {
4702 gen_helper_exit_atomic(cpu_env);
4703 return DISAS_NORETURN;
4705 return DISAS_NEXT;
4708 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4710 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4711 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4713 gen_helper_srst(cpu_env, r1, r2);
4715 tcg_temp_free_i32(r1);
4716 tcg_temp_free_i32(r2);
4717 set_cc_static(s);
4718 return DISAS_NEXT;
4721 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4723 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4724 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4726 gen_helper_srstu(cpu_env, r1, r2);
4728 tcg_temp_free_i32(r1);
4729 tcg_temp_free_i32(r2);
4730 set_cc_static(s);
4731 return DISAS_NEXT;
4734 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4736 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4737 return DISAS_NEXT;
4740 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4742 tcg_gen_movi_i64(cc_src, 0);
4743 tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4744 return DISAS_NEXT;
4747 /* Compute borrow (0, -1) into cc_src. */
4748 static void compute_borrow(DisasContext *s)
4750 switch (s->cc_op) {
4751 case CC_OP_SUBU:
4752 /* The borrow value is already in cc_src (0,-1). */
4753 break;
4754 default:
4755 gen_op_calc_cc(s);
4756 /* fall through */
4757 case CC_OP_STATIC:
4758 /* The carry flag is the msb of CC; compute into cc_src. */
4759 tcg_gen_extu_i32_i64(cc_src, cc_op);
4760 tcg_gen_shri_i64(cc_src, cc_src, 1);
4761 /* fall through */
4762 case CC_OP_ADDU:
4763 /* Convert carry (1,0) to borrow (0,-1). */
4764 tcg_gen_subi_i64(cc_src, cc_src, 1);
4765 break;
4769 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4771 compute_borrow(s);
4773 /* Borrow is {0, -1}, so add to subtract. */
4774 tcg_gen_add_i64(o->out, o->in1, cc_src);
4775 tcg_gen_sub_i64(o->out, o->out, o->in2);
4776 return DISAS_NEXT;
4779 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4781 compute_borrow(s);
4784 * Borrow is {0, -1}, so add to subtract; replicate the
4785 * borrow input to produce 128-bit -1 for the addition.
4787 TCGv_i64 zero = tcg_const_i64(0);
4788 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4789 tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4790 tcg_temp_free_i64(zero);
4792 return DISAS_NEXT;
4795 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4797 TCGv_i32 t;
4799 update_psw_addr(s);
4800 update_cc_op(s);
4802 t = tcg_const_i32(get_field(s, i1) & 0xff);
4803 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4804 tcg_temp_free_i32(t);
4806 t = tcg_const_i32(s->ilen);
4807 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4808 tcg_temp_free_i32(t);
4810 gen_exception(EXCP_SVC);
4811 return DISAS_NORETURN;
4814 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4816 int cc = 0;
4818 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4819 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4820 gen_op_movi_cc(s, cc);
4821 return DISAS_NEXT;
4824 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4826 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4827 set_cc_static(s);
4828 return DISAS_NEXT;
4831 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4833 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4834 set_cc_static(s);
4835 return DISAS_NEXT;
4838 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4840 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4841 set_cc_static(s);
4842 return DISAS_NEXT;
4845 #ifndef CONFIG_USER_ONLY
4847 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4849 gen_helper_testblock(cc_op, cpu_env, o->in2);
4850 set_cc_static(s);
4851 return DISAS_NEXT;
4854 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4856 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4857 set_cc_static(s);
4858 return DISAS_NEXT;
4861 #endif
4863 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4865 TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4866 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4867 tcg_temp_free_i32(l1);
4868 set_cc_static(s);
4869 return DISAS_NEXT;
4872 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4874 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4875 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4876 tcg_temp_free_i32(l);
4877 set_cc_static(s);
4878 return DISAS_NEXT;
4881 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4883 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4884 return_low128(o->out2);
4885 set_cc_static(s);
4886 return DISAS_NEXT;
4889 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4891 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4892 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4893 tcg_temp_free_i32(l);
4894 set_cc_static(s);
4895 return DISAS_NEXT;
4898 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4900 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4901 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4902 tcg_temp_free_i32(l);
4903 set_cc_static(s);
4904 return DISAS_NEXT;
4907 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4909 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4910 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4911 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4912 TCGv_i32 tst = tcg_temp_new_i32();
4913 int m3 = get_field(s, m3);
4915 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4916 m3 = 0;
4918 if (m3 & 1) {
4919 tcg_gen_movi_i32(tst, -1);
4920 } else {
4921 tcg_gen_extrl_i64_i32(tst, regs[0]);
4922 if (s->insn->opc & 3) {
4923 tcg_gen_ext8u_i32(tst, tst);
4924 } else {
4925 tcg_gen_ext16u_i32(tst, tst);
4928 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4930 tcg_temp_free_i32(r1);
4931 tcg_temp_free_i32(r2);
4932 tcg_temp_free_i32(sizes);
4933 tcg_temp_free_i32(tst);
4934 set_cc_static(s);
4935 return DISAS_NEXT;
4938 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4940 TCGv_i32 t1 = tcg_const_i32(0xff);
4941 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4942 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4943 tcg_temp_free_i32(t1);
4944 set_cc_static(s);
4945 return DISAS_NEXT;
4948 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4950 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4951 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4952 tcg_temp_free_i32(l);
4953 return DISAS_NEXT;
4956 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4958 int l1 = get_field(s, l1) + 1;
4959 TCGv_i32 l;
4961 /* The length must not exceed 32 bytes. */
4962 if (l1 > 32) {
4963 gen_program_exception(s, PGM_SPECIFICATION);
4964 return DISAS_NORETURN;
4966 l = tcg_const_i32(l1);
4967 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4968 tcg_temp_free_i32(l);
4969 set_cc_static(s);
4970 return DISAS_NEXT;
4973 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4975 int l1 = get_field(s, l1) + 1;
4976 TCGv_i32 l;
4978 /* The length must be even and should not exceed 64 bytes. */
4979 if ((l1 & 1) || (l1 > 64)) {
4980 gen_program_exception(s, PGM_SPECIFICATION);
4981 return DISAS_NORETURN;
4983 l = tcg_const_i32(l1);
4984 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4985 tcg_temp_free_i32(l);
4986 set_cc_static(s);
4987 return DISAS_NEXT;
4991 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4993 int d1 = get_field(s, d1);
4994 int d2 = get_field(s, d2);
4995 int b1 = get_field(s, b1);
4996 int b2 = get_field(s, b2);
4997 int l = get_field(s, l1);
4998 TCGv_i32 t32;
5000 o->addr1 = get_address(s, 0, b1, d1);
5002 /* If the addresses are identical, this is a store/memset of zero. */
5003 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
5004 o->in2 = tcg_const_i64(0);
5006 l++;
5007 while (l >= 8) {
5008 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
5009 l -= 8;
5010 if (l > 0) {
5011 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
5014 if (l >= 4) {
5015 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
5016 l -= 4;
5017 if (l > 0) {
5018 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
5021 if (l >= 2) {
5022 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
5023 l -= 2;
5024 if (l > 0) {
5025 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5028 if (l) {
5029 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5031 gen_op_movi_cc(s, 0);
5032 return DISAS_NEXT;
5035 /* But in general we'll defer to a helper. */
5036 o->in2 = get_address(s, 0, b2, d2);
5037 t32 = tcg_const_i32(l);
5038 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5039 tcg_temp_free_i32(t32);
5040 set_cc_static(s);
5041 return DISAS_NEXT;
5044 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5046 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5047 return DISAS_NEXT;
5050 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5052 int shift = s->insn->data & 0xff;
5053 int size = s->insn->data >> 8;
5054 uint64_t mask = ((1ull << size) - 1) << shift;
5056 assert(!o->g_in2);
5057 tcg_gen_shli_i64(o->in2, o->in2, shift);
5058 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5060 /* Produce the CC from only the bits manipulated. */
5061 tcg_gen_andi_i64(cc_dst, o->out, mask);
5062 set_cc_nz_u64(s, cc_dst);
5063 return DISAS_NEXT;
5066 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5068 o->in1 = tcg_temp_new_i64();
5070 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5071 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5072 } else {
5073 /* Perform the atomic operation in memory. */
5074 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5075 s->insn->data);
5078 /* Recompute also for atomic case: needed for setting CC. */
5079 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5081 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5082 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5084 return DISAS_NEXT;
5087 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5089 o->out = tcg_const_i64(0);
5090 return DISAS_NEXT;
5093 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5095 o->out = tcg_const_i64(0);
5096 o->out2 = o->out;
5097 o->g_out2 = true;
5098 return DISAS_NEXT;
5101 #ifndef CONFIG_USER_ONLY
5102 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5104 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5106 gen_helper_clp(cpu_env, r2);
5107 tcg_temp_free_i32(r2);
5108 set_cc_static(s);
5109 return DISAS_NEXT;
5112 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5114 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5115 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5117 gen_helper_pcilg(cpu_env, r1, r2);
5118 tcg_temp_free_i32(r1);
5119 tcg_temp_free_i32(r2);
5120 set_cc_static(s);
5121 return DISAS_NEXT;
5124 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5126 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5127 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5129 gen_helper_pcistg(cpu_env, r1, r2);
5130 tcg_temp_free_i32(r1);
5131 tcg_temp_free_i32(r2);
5132 set_cc_static(s);
5133 return DISAS_NEXT;
5136 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5138 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5139 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5141 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5142 tcg_temp_free_i32(ar);
5143 tcg_temp_free_i32(r1);
5144 set_cc_static(s);
5145 return DISAS_NEXT;
5148 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5150 gen_helper_sic(cpu_env, o->in1, o->in2);
5151 return DISAS_NEXT;
5154 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5156 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5157 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5159 gen_helper_rpcit(cpu_env, r1, r2);
5160 tcg_temp_free_i32(r1);
5161 tcg_temp_free_i32(r2);
5162 set_cc_static(s);
5163 return DISAS_NEXT;
5166 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5168 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5169 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5170 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5172 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5173 tcg_temp_free_i32(ar);
5174 tcg_temp_free_i32(r1);
5175 tcg_temp_free_i32(r3);
5176 set_cc_static(s);
5177 return DISAS_NEXT;
5180 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5182 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5183 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5185 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5186 tcg_temp_free_i32(ar);
5187 tcg_temp_free_i32(r1);
5188 set_cc_static(s);
5189 return DISAS_NEXT;
5191 #endif
5193 #include "translate_vx.c.inc"
5195 /* ====================================================================== */
5196 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5197 the original inputs), update the various cc data structures in order to
5198 be able to compute the new condition code. */
5200 static void cout_abs32(DisasContext *s, DisasOps *o)
5202 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5205 static void cout_abs64(DisasContext *s, DisasOps *o)
5207 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5210 static void cout_adds32(DisasContext *s, DisasOps *o)
5212 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5215 static void cout_adds64(DisasContext *s, DisasOps *o)
5217 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5220 static void cout_addu32(DisasContext *s, DisasOps *o)
5222 tcg_gen_shri_i64(cc_src, o->out, 32);
5223 tcg_gen_ext32u_i64(cc_dst, o->out);
5224 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5227 static void cout_addu64(DisasContext *s, DisasOps *o)
5229 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5232 static void cout_cmps32(DisasContext *s, DisasOps *o)
5234 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5237 static void cout_cmps64(DisasContext *s, DisasOps *o)
5239 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5242 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5244 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5247 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5249 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5252 static void cout_f32(DisasContext *s, DisasOps *o)
5254 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5257 static void cout_f64(DisasContext *s, DisasOps *o)
5259 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5262 static void cout_f128(DisasContext *s, DisasOps *o)
5264 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5267 static void cout_nabs32(DisasContext *s, DisasOps *o)
5269 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5272 static void cout_nabs64(DisasContext *s, DisasOps *o)
5274 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5277 static void cout_neg32(DisasContext *s, DisasOps *o)
5279 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5282 static void cout_neg64(DisasContext *s, DisasOps *o)
5284 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5287 static void cout_nz32(DisasContext *s, DisasOps *o)
5289 tcg_gen_ext32u_i64(cc_dst, o->out);
5290 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5293 static void cout_nz64(DisasContext *s, DisasOps *o)
5295 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5298 static void cout_s32(DisasContext *s, DisasOps *o)
5300 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5303 static void cout_s64(DisasContext *s, DisasOps *o)
5305 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5308 static void cout_subs32(DisasContext *s, DisasOps *o)
5310 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5313 static void cout_subs64(DisasContext *s, DisasOps *o)
5315 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5318 static void cout_subu32(DisasContext *s, DisasOps *o)
5320 tcg_gen_sari_i64(cc_src, o->out, 32);
5321 tcg_gen_ext32u_i64(cc_dst, o->out);
5322 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5325 static void cout_subu64(DisasContext *s, DisasOps *o)
5327 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5330 static void cout_tm32(DisasContext *s, DisasOps *o)
5332 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5335 static void cout_tm64(DisasContext *s, DisasOps *o)
5337 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5340 static void cout_muls32(DisasContext *s, DisasOps *o)
5342 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5345 static void cout_muls64(DisasContext *s, DisasOps *o)
5347 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5348 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5351 /* ====================================================================== */
5352 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5353 with the TCG register to which we will write. Used in combination with
5354 the "wout" generators, in some cases we need a new temporary, and in
5355 some cases we can write to a TCG global. */
5357 static void prep_new(DisasContext *s, DisasOps *o)
5359 o->out = tcg_temp_new_i64();
5361 #define SPEC_prep_new 0
5363 static void prep_new_P(DisasContext *s, DisasOps *o)
5365 o->out = tcg_temp_new_i64();
5366 o->out2 = tcg_temp_new_i64();
5368 #define SPEC_prep_new_P 0
5370 static void prep_r1(DisasContext *s, DisasOps *o)
5372 o->out = regs[get_field(s, r1)];
5373 o->g_out = true;
5375 #define SPEC_prep_r1 0
5377 static void prep_r1_P(DisasContext *s, DisasOps *o)
5379 int r1 = get_field(s, r1);
5380 o->out = regs[r1];
5381 o->out2 = regs[r1 + 1];
5382 o->g_out = o->g_out2 = true;
5384 #define SPEC_prep_r1_P SPEC_r1_even
5386 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5387 static void prep_x1(DisasContext *s, DisasOps *o)
5389 o->out = load_freg(get_field(s, r1));
5390 o->out2 = load_freg(get_field(s, r1) + 2);
5392 #define SPEC_prep_x1 SPEC_r1_f128
5394 /* ====================================================================== */
5395 /* The "Write OUTput" generators. These generally perform some non-trivial
5396 copy of data to TCG globals, or to main memory. The trivial cases are
5397 generally handled by having a "prep" generator install the TCG global
5398 as the destination of the operation. */
5400 static void wout_r1(DisasContext *s, DisasOps *o)
5402 store_reg(get_field(s, r1), o->out);
5404 #define SPEC_wout_r1 0
5406 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5408 store_reg(get_field(s, r1), o->out2);
5410 #define SPEC_wout_out2_r1 0
5412 static void wout_r1_8(DisasContext *s, DisasOps *o)
5414 int r1 = get_field(s, r1);
5415 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5417 #define SPEC_wout_r1_8 0
5419 static void wout_r1_16(DisasContext *s, DisasOps *o)
5421 int r1 = get_field(s, r1);
5422 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5424 #define SPEC_wout_r1_16 0
5426 static void wout_r1_32(DisasContext *s, DisasOps *o)
5428 store_reg32_i64(get_field(s, r1), o->out);
5430 #define SPEC_wout_r1_32 0
5432 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5434 store_reg32h_i64(get_field(s, r1), o->out);
5436 #define SPEC_wout_r1_32h 0
5438 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5440 int r1 = get_field(s, r1);
5441 store_reg32_i64(r1, o->out);
5442 store_reg32_i64(r1 + 1, o->out2);
5444 #define SPEC_wout_r1_P32 SPEC_r1_even
5446 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5448 int r1 = get_field(s, r1);
5449 store_reg32_i64(r1 + 1, o->out);
5450 tcg_gen_shri_i64(o->out, o->out, 32);
5451 store_reg32_i64(r1, o->out);
5453 #define SPEC_wout_r1_D32 SPEC_r1_even
5455 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5457 int r3 = get_field(s, r3);
5458 store_reg32_i64(r3, o->out);
5459 store_reg32_i64(r3 + 1, o->out2);
5461 #define SPEC_wout_r3_P32 SPEC_r3_even
5463 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5465 int r3 = get_field(s, r3);
5466 store_reg(r3, o->out);
5467 store_reg(r3 + 1, o->out2);
5469 #define SPEC_wout_r3_P64 SPEC_r3_even
5471 static void wout_e1(DisasContext *s, DisasOps *o)
5473 store_freg32_i64(get_field(s, r1), o->out);
5475 #define SPEC_wout_e1 0
5477 static void wout_f1(DisasContext *s, DisasOps *o)
5479 store_freg(get_field(s, r1), o->out);
5481 #define SPEC_wout_f1 0
5483 static void wout_x1(DisasContext *s, DisasOps *o)
5485 int f1 = get_field(s, r1);
5486 store_freg(f1, o->out);
5487 store_freg(f1 + 2, o->out2);
5489 #define SPEC_wout_x1 SPEC_r1_f128
5491 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5493 if (get_field(s, r1) != get_field(s, r2)) {
5494 store_reg32_i64(get_field(s, r1), o->out);
5497 #define SPEC_wout_cond_r1r2_32 0
5499 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5501 if (get_field(s, r1) != get_field(s, r2)) {
5502 store_freg32_i64(get_field(s, r1), o->out);
5505 #define SPEC_wout_cond_e1e2 0
5507 static void wout_m1_8(DisasContext *s, DisasOps *o)
5509 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5511 #define SPEC_wout_m1_8 0
5513 static void wout_m1_16(DisasContext *s, DisasOps *o)
5515 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5517 #define SPEC_wout_m1_16 0
5519 #ifndef CONFIG_USER_ONLY
5520 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5522 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5524 #define SPEC_wout_m1_16a 0
5525 #endif
5527 static void wout_m1_32(DisasContext *s, DisasOps *o)
5529 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5531 #define SPEC_wout_m1_32 0
5533 #ifndef CONFIG_USER_ONLY
5534 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5536 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5538 #define SPEC_wout_m1_32a 0
5539 #endif
5541 static void wout_m1_64(DisasContext *s, DisasOps *o)
5543 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5545 #define SPEC_wout_m1_64 0
5547 #ifndef CONFIG_USER_ONLY
5548 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5550 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5552 #define SPEC_wout_m1_64a 0
5553 #endif
5555 static void wout_m2_32(DisasContext *s, DisasOps *o)
5557 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5559 #define SPEC_wout_m2_32 0
5561 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5563 store_reg(get_field(s, r1), o->in2);
5565 #define SPEC_wout_in2_r1 0
5567 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5569 store_reg32_i64(get_field(s, r1), o->in2);
5571 #define SPEC_wout_in2_r1_32 0
5573 /* ====================================================================== */
5574 /* The "INput 1" generators. These load the first operand to an insn. */
5576 static void in1_r1(DisasContext *s, DisasOps *o)
5578 o->in1 = load_reg(get_field(s, r1));
5580 #define SPEC_in1_r1 0
5582 static void in1_r1_o(DisasContext *s, DisasOps *o)
5584 o->in1 = regs[get_field(s, r1)];
5585 o->g_in1 = true;
5587 #define SPEC_in1_r1_o 0
5589 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5591 o->in1 = tcg_temp_new_i64();
5592 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5594 #define SPEC_in1_r1_32s 0
5596 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5598 o->in1 = tcg_temp_new_i64();
5599 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5601 #define SPEC_in1_r1_32u 0
5603 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5605 o->in1 = tcg_temp_new_i64();
5606 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5608 #define SPEC_in1_r1_sr32 0
5610 static void in1_r1p1(DisasContext *s, DisasOps *o)
5612 o->in1 = load_reg(get_field(s, r1) + 1);
5614 #define SPEC_in1_r1p1 SPEC_r1_even
5616 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5618 o->in1 = regs[get_field(s, r1) + 1];
5619 o->g_in1 = true;
5621 #define SPEC_in1_r1p1_o SPEC_r1_even
5623 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5625 o->in1 = tcg_temp_new_i64();
5626 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5628 #define SPEC_in1_r1p1_32s SPEC_r1_even
5630 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5632 o->in1 = tcg_temp_new_i64();
5633 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5635 #define SPEC_in1_r1p1_32u SPEC_r1_even
5637 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5639 int r1 = get_field(s, r1);
5640 o->in1 = tcg_temp_new_i64();
5641 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5643 #define SPEC_in1_r1_D32 SPEC_r1_even
5645 static void in1_r2(DisasContext *s, DisasOps *o)
5647 o->in1 = load_reg(get_field(s, r2));
5649 #define SPEC_in1_r2 0
5651 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5653 o->in1 = tcg_temp_new_i64();
5654 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5656 #define SPEC_in1_r2_sr32 0
5658 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5660 o->in1 = tcg_temp_new_i64();
5661 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5663 #define SPEC_in1_r2_32u 0
5665 static void in1_r3(DisasContext *s, DisasOps *o)
5667 o->in1 = load_reg(get_field(s, r3));
5669 #define SPEC_in1_r3 0
5671 static void in1_r3_o(DisasContext *s, DisasOps *o)
5673 o->in1 = regs[get_field(s, r3)];
5674 o->g_in1 = true;
5676 #define SPEC_in1_r3_o 0
5678 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5680 o->in1 = tcg_temp_new_i64();
5681 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5683 #define SPEC_in1_r3_32s 0
5685 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5687 o->in1 = tcg_temp_new_i64();
5688 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5690 #define SPEC_in1_r3_32u 0
5692 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5694 int r3 = get_field(s, r3);
5695 o->in1 = tcg_temp_new_i64();
5696 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5698 #define SPEC_in1_r3_D32 SPEC_r3_even
5700 static void in1_e1(DisasContext *s, DisasOps *o)
5702 o->in1 = load_freg32_i64(get_field(s, r1));
5704 #define SPEC_in1_e1 0
5706 static void in1_f1(DisasContext *s, DisasOps *o)
5708 o->in1 = load_freg(get_field(s, r1));
5710 #define SPEC_in1_f1 0
5712 /* Load the high double word of an extended (128-bit) format FP number */
5713 static void in1_x2h(DisasContext *s, DisasOps *o)
5715 o->in1 = load_freg(get_field(s, r2));
5717 #define SPEC_in1_x2h SPEC_r2_f128
5719 static void in1_f3(DisasContext *s, DisasOps *o)
5721 o->in1 = load_freg(get_field(s, r3));
5723 #define SPEC_in1_f3 0
5725 static void in1_la1(DisasContext *s, DisasOps *o)
5727 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5729 #define SPEC_in1_la1 0
5731 static void in1_la2(DisasContext *s, DisasOps *o)
5733 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5734 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5736 #define SPEC_in1_la2 0
5738 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5740 in1_la1(s, o);
5741 o->in1 = tcg_temp_new_i64();
5742 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5744 #define SPEC_in1_m1_8u 0
5746 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5748 in1_la1(s, o);
5749 o->in1 = tcg_temp_new_i64();
5750 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5752 #define SPEC_in1_m1_16s 0
5754 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5756 in1_la1(s, o);
5757 o->in1 = tcg_temp_new_i64();
5758 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5760 #define SPEC_in1_m1_16u 0
5762 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5764 in1_la1(s, o);
5765 o->in1 = tcg_temp_new_i64();
5766 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5768 #define SPEC_in1_m1_32s 0
5770 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5772 in1_la1(s, o);
5773 o->in1 = tcg_temp_new_i64();
5774 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5776 #define SPEC_in1_m1_32u 0
5778 static void in1_m1_64(DisasContext *s, DisasOps *o)
5780 in1_la1(s, o);
5781 o->in1 = tcg_temp_new_i64();
5782 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5784 #define SPEC_in1_m1_64 0
5786 /* ====================================================================== */
5787 /* The "INput 2" generators. These load the second operand to an insn. */
5789 static void in2_r1_o(DisasContext *s, DisasOps *o)
5791 o->in2 = regs[get_field(s, r1)];
5792 o->g_in2 = true;
5794 #define SPEC_in2_r1_o 0
5796 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5798 o->in2 = tcg_temp_new_i64();
5799 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5801 #define SPEC_in2_r1_16u 0
5803 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5805 o->in2 = tcg_temp_new_i64();
5806 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5808 #define SPEC_in2_r1_32u 0
5810 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5812 int r1 = get_field(s, r1);
5813 o->in2 = tcg_temp_new_i64();
5814 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5816 #define SPEC_in2_r1_D32 SPEC_r1_even
5818 static void in2_r2(DisasContext *s, DisasOps *o)
5820 o->in2 = load_reg(get_field(s, r2));
5822 #define SPEC_in2_r2 0
5824 static void in2_r2_o(DisasContext *s, DisasOps *o)
5826 o->in2 = regs[get_field(s, r2)];
5827 o->g_in2 = true;
5829 #define SPEC_in2_r2_o 0
5831 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5833 int r2 = get_field(s, r2);
5834 if (r2 != 0) {
5835 o->in2 = load_reg(r2);
5838 #define SPEC_in2_r2_nz 0
5840 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5842 o->in2 = tcg_temp_new_i64();
5843 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5845 #define SPEC_in2_r2_8s 0
5847 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5849 o->in2 = tcg_temp_new_i64();
5850 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5852 #define SPEC_in2_r2_8u 0
5854 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5856 o->in2 = tcg_temp_new_i64();
5857 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5859 #define SPEC_in2_r2_16s 0
5861 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5863 o->in2 = tcg_temp_new_i64();
5864 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5866 #define SPEC_in2_r2_16u 0
5868 static void in2_r3(DisasContext *s, DisasOps *o)
5870 o->in2 = load_reg(get_field(s, r3));
5872 #define SPEC_in2_r3 0
5874 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5876 o->in2 = tcg_temp_new_i64();
5877 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5879 #define SPEC_in2_r3_sr32 0
5881 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5883 o->in2 = tcg_temp_new_i64();
5884 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5886 #define SPEC_in2_r3_32u 0
5888 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5890 o->in2 = tcg_temp_new_i64();
5891 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5893 #define SPEC_in2_r2_32s 0
5895 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5897 o->in2 = tcg_temp_new_i64();
5898 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5900 #define SPEC_in2_r2_32u 0
5902 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5904 o->in2 = tcg_temp_new_i64();
5905 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5907 #define SPEC_in2_r2_sr32 0
5909 static void in2_e2(DisasContext *s, DisasOps *o)
5911 o->in2 = load_freg32_i64(get_field(s, r2));
5913 #define SPEC_in2_e2 0
5915 static void in2_f2(DisasContext *s, DisasOps *o)
5917 o->in2 = load_freg(get_field(s, r2));
5919 #define SPEC_in2_f2 0
5921 /* Load the low double word of an extended (128-bit) format FP number */
5922 static void in2_x2l(DisasContext *s, DisasOps *o)
5924 o->in2 = load_freg(get_field(s, r2) + 2);
5926 #define SPEC_in2_x2l SPEC_r2_f128
5928 static void in2_ra2(DisasContext *s, DisasOps *o)
5930 int r2 = get_field(s, r2);
5932 /* Note: *don't* treat !r2 as 0, use the reg value. */
5933 o->in2 = tcg_temp_new_i64();
5934 gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5936 #define SPEC_in2_ra2 0
5938 static void in2_a2(DisasContext *s, DisasOps *o)
5940 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5941 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5943 #define SPEC_in2_a2 0
5945 static void in2_ri2(DisasContext *s, DisasOps *o)
5947 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5949 #define SPEC_in2_ri2 0
5951 static void in2_sh32(DisasContext *s, DisasOps *o)
5953 help_l2_shift(s, o, 31);
5955 #define SPEC_in2_sh32 0
5957 static void in2_sh64(DisasContext *s, DisasOps *o)
5959 help_l2_shift(s, o, 63);
5961 #define SPEC_in2_sh64 0
5963 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5965 in2_a2(s, o);
5966 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5968 #define SPEC_in2_m2_8u 0
5970 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5972 in2_a2(s, o);
5973 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5975 #define SPEC_in2_m2_16s 0
5977 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5979 in2_a2(s, o);
5980 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5982 #define SPEC_in2_m2_16u 0
5984 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5986 in2_a2(s, o);
5987 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5989 #define SPEC_in2_m2_32s 0
5991 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5993 in2_a2(s, o);
5994 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5996 #define SPEC_in2_m2_32u 0
5998 #ifndef CONFIG_USER_ONLY
5999 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
6001 in2_a2(s, o);
6002 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
6004 #define SPEC_in2_m2_32ua 0
6005 #endif
6007 static void in2_m2_64(DisasContext *s, DisasOps *o)
6009 in2_a2(s, o);
6010 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6012 #define SPEC_in2_m2_64 0
6014 static void in2_m2_64w(DisasContext *s, DisasOps *o)
6016 in2_a2(s, o);
6017 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6018 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
6020 #define SPEC_in2_m2_64w 0
6022 #ifndef CONFIG_USER_ONLY
6023 static void in2_m2_64a(DisasContext *s, DisasOps *o)
6025 in2_a2(s, o);
6026 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
6028 #define SPEC_in2_m2_64a 0
6029 #endif
6031 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6033 in2_ri2(s, o);
6034 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6036 #define SPEC_in2_mri2_16u 0
6038 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6040 in2_ri2(s, o);
6041 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6043 #define SPEC_in2_mri2_32s 0
6045 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6047 in2_ri2(s, o);
6048 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6050 #define SPEC_in2_mri2_32u 0
6052 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6054 in2_ri2(s, o);
6055 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6057 #define SPEC_in2_mri2_64 0
6059 static void in2_i2(DisasContext *s, DisasOps *o)
6061 o->in2 = tcg_const_i64(get_field(s, i2));
6063 #define SPEC_in2_i2 0
6065 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6067 o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6069 #define SPEC_in2_i2_8u 0
6071 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6073 o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6075 #define SPEC_in2_i2_16u 0
6077 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6079 o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6081 #define SPEC_in2_i2_32u 0
6083 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6085 uint64_t i2 = (uint16_t)get_field(s, i2);
6086 o->in2 = tcg_const_i64(i2 << s->insn->data);
6088 #define SPEC_in2_i2_16u_shl 0
6090 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6092 uint64_t i2 = (uint32_t)get_field(s, i2);
6093 o->in2 = tcg_const_i64(i2 << s->insn->data);
6095 #define SPEC_in2_i2_32u_shl 0
6097 #ifndef CONFIG_USER_ONLY
6098 static void in2_insn(DisasContext *s, DisasOps *o)
6100 o->in2 = tcg_const_i64(s->fields.raw_insn);
6102 #define SPEC_in2_insn 0
6103 #endif
6105 /* ====================================================================== */
6107 /* Find opc within the table of insns. This is formulated as a switch
6108 statement so that (1) we get compile-time notice of cut-paste errors
6109 for duplicated opcodes, and (2) the compiler generates the binary
6110 search tree, rather than us having to post-process the table. */
6112 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6113 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6115 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6116 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6118 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6119 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6121 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6123 enum DisasInsnEnum {
6124 #include "insn-data.def"
6127 #undef E
6128 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6129 .opc = OPC, \
6130 .flags = FL, \
6131 .fmt = FMT_##FT, \
6132 .fac = FAC_##FC, \
6133 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6134 .name = #NM, \
6135 .help_in1 = in1_##I1, \
6136 .help_in2 = in2_##I2, \
6137 .help_prep = prep_##P, \
6138 .help_wout = wout_##W, \
6139 .help_cout = cout_##CC, \
6140 .help_op = op_##OP, \
6141 .data = D \
6144 /* Allow 0 to be used for NULL in the table below. */
6145 #define in1_0 NULL
6146 #define in2_0 NULL
6147 #define prep_0 NULL
6148 #define wout_0 NULL
6149 #define cout_0 NULL
6150 #define op_0 NULL
6152 #define SPEC_in1_0 0
6153 #define SPEC_in2_0 0
6154 #define SPEC_prep_0 0
6155 #define SPEC_wout_0 0
6157 /* Give smaller names to the various facilities. */
6158 #define FAC_Z S390_FEAT_ZARCH
6159 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6160 #define FAC_DFP S390_FEAT_DFP
6161 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6162 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6163 #define FAC_EE S390_FEAT_EXECUTE_EXT
6164 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6165 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6166 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6167 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6168 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6169 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6170 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6171 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6172 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6173 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6174 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6175 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6176 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6177 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6178 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6179 #define FAC_SFLE S390_FEAT_STFLE
6180 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6181 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6182 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6183 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6184 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6185 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6186 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6187 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6188 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6189 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6190 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6191 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6192 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6193 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6194 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6195 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6196 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6197 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6198 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6199 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6201 static const DisasInsn insn_info[] = {
6202 #include "insn-data.def"
6205 #undef E
6206 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6207 case OPC: return &insn_info[insn_ ## NM];
6209 static const DisasInsn *lookup_opc(uint16_t opc)
6211 switch (opc) {
6212 #include "insn-data.def"
6213 default:
6214 return NULL;
6218 #undef F
6219 #undef E
6220 #undef D
6221 #undef C
6223 /* Extract a field from the insn. The INSN should be left-aligned in
6224 the uint64_t so that we can more easily utilize the big-bit-endian
6225 definitions we extract from the Principals of Operation. */
6227 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6229 uint32_t r, m;
6231 if (f->size == 0) {
6232 return;
6235 /* Zero extract the field from the insn. */
6236 r = (insn << f->beg) >> (64 - f->size);
6238 /* Sign-extend, or un-swap the field as necessary. */
6239 switch (f->type) {
6240 case 0: /* unsigned */
6241 break;
6242 case 1: /* signed */
6243 assert(f->size <= 32);
6244 m = 1u << (f->size - 1);
6245 r = (r ^ m) - m;
6246 break;
6247 case 2: /* dl+dh split, signed 20 bit. */
6248 r = ((int8_t)r << 12) | (r >> 8);
6249 break;
6250 case 3: /* MSB stored in RXB */
6251 g_assert(f->size == 4);
6252 switch (f->beg) {
6253 case 8:
6254 r |= extract64(insn, 63 - 36, 1) << 4;
6255 break;
6256 case 12:
6257 r |= extract64(insn, 63 - 37, 1) << 4;
6258 break;
6259 case 16:
6260 r |= extract64(insn, 63 - 38, 1) << 4;
6261 break;
6262 case 32:
6263 r |= extract64(insn, 63 - 39, 1) << 4;
6264 break;
6265 default:
6266 g_assert_not_reached();
6268 break;
6269 default:
6270 abort();
6273 /* Validate that the "compressed" encoding we selected above is valid.
6274 I.e. we havn't make two different original fields overlap. */
6275 assert(((o->presentC >> f->indexC) & 1) == 0);
6276 o->presentC |= 1 << f->indexC;
6277 o->presentO |= 1 << f->indexO;
6279 o->c[f->indexC] = r;
6282 /* Lookup the insn at the current PC, extracting the operands into O and
6283 returning the info struct for the insn. Returns NULL for invalid insn. */
6285 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6287 uint64_t insn, pc = s->base.pc_next;
6288 int op, op2, ilen;
6289 const DisasInsn *info;
6291 if (unlikely(s->ex_value)) {
6292 /* Drop the EX data now, so that it's clear on exception paths. */
6293 TCGv_i64 zero = tcg_const_i64(0);
6294 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6295 tcg_temp_free_i64(zero);
6297 /* Extract the values saved by EXECUTE. */
6298 insn = s->ex_value & 0xffffffffffff0000ull;
6299 ilen = s->ex_value & 0xf;
6300 op = insn >> 56;
6301 } else {
6302 insn = ld_code2(env, pc);
6303 op = (insn >> 8) & 0xff;
6304 ilen = get_ilen(op);
6305 switch (ilen) {
6306 case 2:
6307 insn = insn << 48;
6308 break;
6309 case 4:
6310 insn = ld_code4(env, pc) << 32;
6311 break;
6312 case 6:
6313 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6314 break;
6315 default:
6316 g_assert_not_reached();
6319 s->pc_tmp = s->base.pc_next + ilen;
6320 s->ilen = ilen;
6322 /* We can't actually determine the insn format until we've looked up
6323 the full insn opcode. Which we can't do without locating the
6324 secondary opcode. Assume by default that OP2 is at bit 40; for
6325 those smaller insns that don't actually have a secondary opcode
6326 this will correctly result in OP2 = 0. */
6327 switch (op) {
6328 case 0x01: /* E */
6329 case 0x80: /* S */
6330 case 0x82: /* S */
6331 case 0x93: /* S */
6332 case 0xb2: /* S, RRF, RRE, IE */
6333 case 0xb3: /* RRE, RRD, RRF */
6334 case 0xb9: /* RRE, RRF */
6335 case 0xe5: /* SSE, SIL */
6336 op2 = (insn << 8) >> 56;
6337 break;
6338 case 0xa5: /* RI */
6339 case 0xa7: /* RI */
6340 case 0xc0: /* RIL */
6341 case 0xc2: /* RIL */
6342 case 0xc4: /* RIL */
6343 case 0xc6: /* RIL */
6344 case 0xc8: /* SSF */
6345 case 0xcc: /* RIL */
6346 op2 = (insn << 12) >> 60;
6347 break;
6348 case 0xc5: /* MII */
6349 case 0xc7: /* SMI */
6350 case 0xd0 ... 0xdf: /* SS */
6351 case 0xe1: /* SS */
6352 case 0xe2: /* SS */
6353 case 0xe8: /* SS */
6354 case 0xe9: /* SS */
6355 case 0xea: /* SS */
6356 case 0xee ... 0xf3: /* SS */
6357 case 0xf8 ... 0xfd: /* SS */
6358 op2 = 0;
6359 break;
6360 default:
6361 op2 = (insn << 40) >> 56;
6362 break;
6365 memset(&s->fields, 0, sizeof(s->fields));
6366 s->fields.raw_insn = insn;
6367 s->fields.op = op;
6368 s->fields.op2 = op2;
6370 /* Lookup the instruction. */
6371 info = lookup_opc(op << 8 | op2);
6372 s->insn = info;
6374 /* If we found it, extract the operands. */
6375 if (info != NULL) {
6376 DisasFormat fmt = info->fmt;
6377 int i;
6379 for (i = 0; i < NUM_C_FIELD; ++i) {
6380 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6383 return info;
6386 static bool is_afp_reg(int reg)
6388 return reg % 2 || reg > 6;
6391 static bool is_fp_pair(int reg)
6393 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6394 return !(reg & 0x2);
6397 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6399 const DisasInsn *insn;
6400 DisasJumpType ret = DISAS_NEXT;
6401 DisasOps o = {};
6402 bool icount = false;
6404 /* Search for the insn in the table. */
6405 insn = extract_insn(env, s);
6407 /* Emit insn_start now that we know the ILEN. */
6408 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6410 /* Not found means unimplemented/illegal opcode. */
6411 if (insn == NULL) {
6412 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6413 s->fields.op, s->fields.op2);
6414 gen_illegal_opcode(s);
6415 return DISAS_NORETURN;
6418 #ifndef CONFIG_USER_ONLY
6419 if (s->base.tb->flags & FLAG_MASK_PER) {
6420 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6421 gen_helper_per_ifetch(cpu_env, addr);
6422 tcg_temp_free_i64(addr);
6424 #endif
6426 /* process flags */
6427 if (insn->flags) {
6428 /* privileged instruction */
6429 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6430 gen_program_exception(s, PGM_PRIVILEGED);
6431 return DISAS_NORETURN;
6434 /* if AFP is not enabled, instructions and registers are forbidden */
6435 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6436 uint8_t dxc = 0;
6438 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6439 dxc = 1;
6441 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6442 dxc = 1;
6444 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6445 dxc = 1;
6447 if (insn->flags & IF_BFP) {
6448 dxc = 2;
6450 if (insn->flags & IF_DFP) {
6451 dxc = 3;
6453 if (insn->flags & IF_VEC) {
6454 dxc = 0xfe;
6456 if (dxc) {
6457 gen_data_exception(dxc);
6458 return DISAS_NORETURN;
6462 /* if vector instructions not enabled, executing them is forbidden */
6463 if (insn->flags & IF_VEC) {
6464 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6465 gen_data_exception(0xfe);
6466 return DISAS_NORETURN;
6470 /* input/output is the special case for icount mode */
6471 if (unlikely(insn->flags & IF_IO)) {
6472 icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6473 if (icount) {
6474 gen_io_start();
6479 /* Check for insn specification exceptions. */
6480 if (insn->spec) {
6481 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6482 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6483 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6484 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6485 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6486 gen_program_exception(s, PGM_SPECIFICATION);
6487 return DISAS_NORETURN;
6491 /* Implement the instruction. */
6492 if (insn->help_in1) {
6493 insn->help_in1(s, &o);
6495 if (insn->help_in2) {
6496 insn->help_in2(s, &o);
6498 if (insn->help_prep) {
6499 insn->help_prep(s, &o);
6501 if (insn->help_op) {
6502 ret = insn->help_op(s, &o);
6504 if (ret != DISAS_NORETURN) {
6505 if (insn->help_wout) {
6506 insn->help_wout(s, &o);
6508 if (insn->help_cout) {
6509 insn->help_cout(s, &o);
6513 /* Free any temporaries created by the helpers. */
6514 if (o.out && !o.g_out) {
6515 tcg_temp_free_i64(o.out);
6517 if (o.out2 && !o.g_out2) {
6518 tcg_temp_free_i64(o.out2);
6520 if (o.in1 && !o.g_in1) {
6521 tcg_temp_free_i64(o.in1);
6523 if (o.in2 && !o.g_in2) {
6524 tcg_temp_free_i64(o.in2);
6526 if (o.addr1) {
6527 tcg_temp_free_i64(o.addr1);
6530 /* io should be the last instruction in tb when icount is enabled */
6531 if (unlikely(icount && ret == DISAS_NEXT)) {
6532 ret = DISAS_PC_STALE;
6535 #ifndef CONFIG_USER_ONLY
6536 if (s->base.tb->flags & FLAG_MASK_PER) {
6537 /* An exception might be triggered, save PSW if not already done. */
6538 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6539 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6542 /* Call the helper to check for a possible PER exception. */
6543 gen_helper_per_check_exception(cpu_env);
6545 #endif
6547 /* Advance to the next instruction. */
6548 s->base.pc_next = s->pc_tmp;
6549 return ret;
6552 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6554 DisasContext *dc = container_of(dcbase, DisasContext, base);
6556 /* 31-bit mode */
6557 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6558 dc->base.pc_first &= 0x7fffffff;
6559 dc->base.pc_next = dc->base.pc_first;
6562 dc->cc_op = CC_OP_DYNAMIC;
6563 dc->ex_value = dc->base.tb->cs_base;
6564 dc->do_debug = dc->base.singlestep_enabled;
6567 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6571 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6575 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6576 const CPUBreakpoint *bp)
6578 DisasContext *dc = container_of(dcbase, DisasContext, base);
6581 * Emit an insn_start to accompany the breakpoint exception.
6582 * The ILEN value is a dummy, since this does not result in
6583 * an s390x exception, but an internal qemu exception which
6584 * brings us back to interact with the gdbstub.
6586 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6588 dc->base.is_jmp = DISAS_PC_STALE;
6589 dc->do_debug = true;
6590 /* The address covered by the breakpoint must be included in
6591 [tb->pc, tb->pc + tb->size) in order to for it to be
6592 properly cleared -- thus we increment the PC here so that
6593 the logic setting tb->size does the right thing. */
6594 dc->base.pc_next += 2;
6595 return true;
6598 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6600 CPUS390XState *env = cs->env_ptr;
6601 DisasContext *dc = container_of(dcbase, DisasContext, base);
6603 dc->base.is_jmp = translate_one(env, dc);
6604 if (dc->base.is_jmp == DISAS_NEXT) {
6605 uint64_t page_start;
6607 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6608 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6609 dc->base.is_jmp = DISAS_TOO_MANY;
6614 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6616 DisasContext *dc = container_of(dcbase, DisasContext, base);
6618 switch (dc->base.is_jmp) {
6619 case DISAS_GOTO_TB:
6620 case DISAS_NORETURN:
6621 break;
6622 case DISAS_TOO_MANY:
6623 case DISAS_PC_STALE:
6624 case DISAS_PC_STALE_NOCHAIN:
6625 update_psw_addr(dc);
6626 /* FALLTHRU */
6627 case DISAS_PC_UPDATED:
6628 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6629 cc op type is in env */
6630 update_cc_op(dc);
6631 /* FALLTHRU */
6632 case DISAS_PC_CC_UPDATED:
6633 /* Exit the TB, either by raising a debug exception or by return. */
6634 if (dc->do_debug) {
6635 gen_exception(EXCP_DEBUG);
6636 } else if (use_exit_tb(dc) ||
6637 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6638 tcg_gen_exit_tb(NULL, 0);
6639 } else {
6640 tcg_gen_lookup_and_goto_ptr();
6642 break;
6643 default:
6644 g_assert_not_reached();
6648 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6650 DisasContext *dc = container_of(dcbase, DisasContext, base);
6652 if (unlikely(dc->ex_value)) {
6653 /* ??? Unfortunately log_target_disas can't use host memory. */
6654 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6655 } else {
6656 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6657 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6661 static const TranslatorOps s390x_tr_ops = {
6662 .init_disas_context = s390x_tr_init_disas_context,
6663 .tb_start = s390x_tr_tb_start,
6664 .insn_start = s390x_tr_insn_start,
6665 .breakpoint_check = s390x_tr_breakpoint_check,
6666 .translate_insn = s390x_tr_translate_insn,
6667 .tb_stop = s390x_tr_tb_stop,
6668 .disas_log = s390x_tr_disas_log,
6671 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6673 DisasContext dc;
6675 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6678 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6679 target_ulong *data)
6681 int cc_op = data[1];
6683 env->psw.addr = data[0];
6685 /* Update the CC opcode if it is not already up-to-date. */
6686 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6687 env->cc_op = cc_op;
6690 /* Record ILEN. */
6691 env->int_pgm_ilen = data[2];