target/s390x: Implement LOAD/STORE TO REAL ADDRESS inline
[qemu/ar7.git] / target / s390x / translate.c
blob4292bb0dd0787f714951ba63644579a470cff8bc
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 DisasContextBase base;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t ex_value;
62 * During translate_one(), pc_tmp is used to determine the instruction
63 * to be executed after base.pc_next - e.g. next sequential instruction
64 * or a branch target.
66 uint64_t pc_tmp;
67 uint32_t ilen;
68 enum cc_op cc_op;
69 bool do_debug;
72 /* Information carried about a condition to be evaluated. */
73 typedef struct {
74 TCGCond cond:8;
75 bool is_64;
76 bool g1;
77 bool g2;
78 union {
79 struct { TCGv_i64 a, b; } s64;
80 struct { TCGv_i32 a, b; } s32;
81 } u;
82 } DisasCompare;
84 #ifdef DEBUG_INLINE_BRANCHES
85 static uint64_t inline_branch_hit[CC_OP_MAX];
86 static uint64_t inline_branch_miss[CC_OP_MAX];
87 #endif
89 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
91 TCGv_i64 tmp;
93 if (s->base.tb->flags & FLAG_MASK_32) {
94 if (s->base.tb->flags & FLAG_MASK_64) {
95 tcg_gen_movi_i64(out, pc);
96 return;
98 pc |= 0x80000000;
100 assert(!(s->base.tb->flags & FLAG_MASK_64));
101 tmp = tcg_const_i64(pc);
102 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
103 tcg_temp_free_i64(tmp);
106 static TCGv_i64 psw_addr;
107 static TCGv_i64 psw_mask;
108 static TCGv_i64 gbea;
110 static TCGv_i32 cc_op;
111 static TCGv_i64 cc_src;
112 static TCGv_i64 cc_dst;
113 static TCGv_i64 cc_vr;
115 static char cpu_reg_names[16][4];
116 static TCGv_i64 regs[16];
118 void s390x_translate_init(void)
120 int i;
122 psw_addr = tcg_global_mem_new_i64(cpu_env,
123 offsetof(CPUS390XState, psw.addr),
124 "psw_addr");
125 psw_mask = tcg_global_mem_new_i64(cpu_env,
126 offsetof(CPUS390XState, psw.mask),
127 "psw_mask");
128 gbea = tcg_global_mem_new_i64(cpu_env,
129 offsetof(CPUS390XState, gbea),
130 "gbea");
132 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
133 "cc_op");
134 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
135 "cc_src");
136 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
137 "cc_dst");
138 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
139 "cc_vr");
141 for (i = 0; i < 16; i++) {
142 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
143 regs[i] = tcg_global_mem_new(cpu_env,
144 offsetof(CPUS390XState, regs[i]),
145 cpu_reg_names[i]);
149 static inline int vec_full_reg_offset(uint8_t reg)
151 g_assert(reg < 32);
152 return offsetof(CPUS390XState, vregs[reg][0]);
155 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
157 /* Convert element size (es) - e.g. MO_8 - to bytes */
158 const uint8_t bytes = 1 << es;
159 int offs = enr * bytes;
162 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
163 * of the 16 byte vector, on both, little and big endian systems.
165 * Big Endian (target/possible host)
166 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
167 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
168 * W: [ 0][ 1] - [ 2][ 3]
169 * DW: [ 0] - [ 1]
171 * Little Endian (possible host)
172 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
173 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
174 * W: [ 1][ 0] - [ 3][ 2]
175 * DW: [ 0] - [ 1]
177 * For 16 byte elements, the two 8 byte halves will not form a host
178 * int128 if the host is little endian, since they're in the wrong order.
179 * Some operations (e.g. xor) do not care. For operations like addition,
180 * the two 8 byte elements have to be loaded separately. Let's force all
181 * 16 byte operations to handle it in a special way.
183 g_assert(es <= MO_64);
184 #ifndef HOST_WORDS_BIGENDIAN
185 offs ^= (8 - bytes);
186 #endif
187 return offs + vec_full_reg_offset(reg);
190 static inline int freg64_offset(uint8_t reg)
192 g_assert(reg < 16);
193 return vec_reg_offset(reg, 0, MO_64);
196 static inline int freg32_offset(uint8_t reg)
198 g_assert(reg < 16);
199 return vec_reg_offset(reg, 0, MO_32);
202 static TCGv_i64 load_reg(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_mov_i64(r, regs[reg]);
206 return r;
209 static TCGv_i64 load_freg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
214 return r;
217 static TCGv_i64 load_freg32_i64(int reg)
219 TCGv_i64 r = tcg_temp_new_i64();
221 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
222 return r;
225 static void store_reg(int reg, TCGv_i64 v)
227 tcg_gen_mov_i64(regs[reg], v);
230 static void store_freg(int reg, TCGv_i64 v)
232 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
235 static void store_reg32_i64(int reg, TCGv_i64 v)
237 /* 32 bit register writes keep the upper half */
238 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
241 static void store_reg32h_i64(int reg, TCGv_i64 v)
243 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
246 static void store_freg32_i64(int reg, TCGv_i64 v)
248 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
251 static void return_low128(TCGv_i64 dest)
253 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
256 static void update_psw_addr(DisasContext *s)
258 /* psw.addr */
259 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
262 static void per_branch(DisasContext *s, bool to_next)
264 #ifndef CONFIG_USER_ONLY
265 tcg_gen_movi_i64(gbea, s->base.pc_next);
267 if (s->base.tb->flags & FLAG_MASK_PER) {
268 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
269 gen_helper_per_branch(cpu_env, gbea, next_pc);
270 if (to_next) {
271 tcg_temp_free_i64(next_pc);
274 #endif
277 static void per_branch_cond(DisasContext *s, TCGCond cond,
278 TCGv_i64 arg1, TCGv_i64 arg2)
280 #ifndef CONFIG_USER_ONLY
281 if (s->base.tb->flags & FLAG_MASK_PER) {
282 TCGLabel *lab = gen_new_label();
283 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
285 tcg_gen_movi_i64(gbea, s->base.pc_next);
286 gen_helper_per_branch(cpu_env, gbea, psw_addr);
288 gen_set_label(lab);
289 } else {
290 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
291 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
292 tcg_temp_free_i64(pc);
294 #endif
297 static void per_breaking_event(DisasContext *s)
299 tcg_gen_movi_i64(gbea, s->base.pc_next);
302 static void update_cc_op(DisasContext *s)
304 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
305 tcg_gen_movi_i32(cc_op, s->cc_op);
309 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
311 return (uint64_t)cpu_lduw_code(env, pc);
314 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
316 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
319 static int get_mem_index(DisasContext *s)
321 #ifdef CONFIG_USER_ONLY
322 return MMU_USER_IDX;
323 #else
324 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
325 return MMU_REAL_IDX;
328 switch (s->base.tb->flags & FLAG_MASK_ASC) {
329 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
330 return MMU_PRIMARY_IDX;
331 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
332 return MMU_SECONDARY_IDX;
333 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
334 return MMU_HOME_IDX;
335 default:
336 tcg_abort();
337 break;
339 #endif
342 static void gen_exception(int excp)
344 TCGv_i32 tmp = tcg_const_i32(excp);
345 gen_helper_exception(cpu_env, tmp);
346 tcg_temp_free_i32(tmp);
349 static void gen_program_exception(DisasContext *s, int code)
351 TCGv_i32 tmp;
353 /* Remember what pgm exeption this was. */
354 tmp = tcg_const_i32(code);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
356 tcg_temp_free_i32(tmp);
358 tmp = tcg_const_i32(s->ilen);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
360 tcg_temp_free_i32(tmp);
362 /* update the psw */
363 update_psw_addr(s);
365 /* Save off cc. */
366 update_cc_op(s);
368 /* Trigger exception. */
369 gen_exception(EXCP_PGM);
372 static inline void gen_illegal_opcode(DisasContext *s)
374 gen_program_exception(s, PGM_OPERATION);
377 static inline void gen_data_exception(uint8_t dxc)
379 TCGv_i32 tmp = tcg_const_i32(dxc);
380 gen_helper_data_exception(cpu_env, tmp);
381 tcg_temp_free_i32(tmp);
384 static inline void gen_trap(DisasContext *s)
386 /* Set DXC to 0xff */
387 gen_data_exception(0xff);
390 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
391 int64_t imm)
393 tcg_gen_addi_i64(dst, src, imm);
394 if (!(s->base.tb->flags & FLAG_MASK_64)) {
395 if (s->base.tb->flags & FLAG_MASK_32) {
396 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
397 } else {
398 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
403 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
405 TCGv_i64 tmp = tcg_temp_new_i64();
408 * Note that d2 is limited to 20 bits, signed. If we crop negative
409 * displacements early we create larger immedate addends.
411 if (b2 && x2) {
412 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
413 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
414 } else if (b2) {
415 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
416 } else if (x2) {
417 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
418 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
419 if (s->base.tb->flags & FLAG_MASK_32) {
420 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
421 } else {
422 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
424 } else {
425 tcg_gen_movi_i64(tmp, d2);
428 return tmp;
431 static inline bool live_cc_data(DisasContext *s)
433 return (s->cc_op != CC_OP_DYNAMIC
434 && s->cc_op != CC_OP_STATIC
435 && s->cc_op > 3);
438 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
445 s->cc_op = CC_OP_CONST0 + val;
448 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
450 if (live_cc_data(s)) {
451 tcg_gen_discard_i64(cc_src);
452 tcg_gen_discard_i64(cc_vr);
454 tcg_gen_mov_i64(cc_dst, dst);
455 s->cc_op = op;
458 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
459 TCGv_i64 dst)
461 if (live_cc_data(s)) {
462 tcg_gen_discard_i64(cc_vr);
464 tcg_gen_mov_i64(cc_src, src);
465 tcg_gen_mov_i64(cc_dst, dst);
466 s->cc_op = op;
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
478 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
480 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
483 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
488 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
493 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
495 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
498 /* CC value is in env->cc_op */
499 static void set_cc_static(DisasContext *s)
501 if (live_cc_data(s)) {
502 tcg_gen_discard_i64(cc_src);
503 tcg_gen_discard_i64(cc_dst);
504 tcg_gen_discard_i64(cc_vr);
506 s->cc_op = CC_OP_STATIC;
509 /* calculates cc into cc_op */
510 static void gen_op_calc_cc(DisasContext *s)
512 TCGv_i32 local_cc_op = NULL;
513 TCGv_i64 dummy = NULL;
515 switch (s->cc_op) {
516 default:
517 dummy = tcg_const_i64(0);
518 /* FALLTHRU */
519 case CC_OP_ADD_64:
520 case CC_OP_ADDU_64:
521 case CC_OP_ADDC_64:
522 case CC_OP_SUB_64:
523 case CC_OP_SUBU_64:
524 case CC_OP_SUBB_64:
525 case CC_OP_ADD_32:
526 case CC_OP_ADDU_32:
527 case CC_OP_ADDC_32:
528 case CC_OP_SUB_32:
529 case CC_OP_SUBU_32:
530 case CC_OP_SUBB_32:
531 local_cc_op = tcg_const_i32(s->cc_op);
532 break;
533 case CC_OP_CONST0:
534 case CC_OP_CONST1:
535 case CC_OP_CONST2:
536 case CC_OP_CONST3:
537 case CC_OP_STATIC:
538 case CC_OP_DYNAMIC:
539 break;
542 switch (s->cc_op) {
543 case CC_OP_CONST0:
544 case CC_OP_CONST1:
545 case CC_OP_CONST2:
546 case CC_OP_CONST3:
547 /* s->cc_op is the cc value */
548 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
549 break;
550 case CC_OP_STATIC:
551 /* env->cc_op already is the cc value */
552 break;
553 case CC_OP_NZ:
554 case CC_OP_ABS_64:
555 case CC_OP_NABS_64:
556 case CC_OP_ABS_32:
557 case CC_OP_NABS_32:
558 case CC_OP_LTGT0_32:
559 case CC_OP_LTGT0_64:
560 case CC_OP_COMP_32:
561 case CC_OP_COMP_64:
562 case CC_OP_NZ_F32:
563 case CC_OP_NZ_F64:
564 case CC_OP_FLOGR:
565 case CC_OP_LCBB:
566 /* 1 argument */
567 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
568 break;
569 case CC_OP_ICM:
570 case CC_OP_LTGT_32:
571 case CC_OP_LTGT_64:
572 case CC_OP_LTUGTU_32:
573 case CC_OP_LTUGTU_64:
574 case CC_OP_TM_32:
575 case CC_OP_TM_64:
576 case CC_OP_SLA_32:
577 case CC_OP_SLA_64:
578 case CC_OP_NZ_F128:
579 case CC_OP_VC:
580 /* 2 arguments */
581 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
582 break;
583 case CC_OP_ADD_64:
584 case CC_OP_ADDU_64:
585 case CC_OP_ADDC_64:
586 case CC_OP_SUB_64:
587 case CC_OP_SUBU_64:
588 case CC_OP_SUBB_64:
589 case CC_OP_ADD_32:
590 case CC_OP_ADDU_32:
591 case CC_OP_ADDC_32:
592 case CC_OP_SUB_32:
593 case CC_OP_SUBU_32:
594 case CC_OP_SUBB_32:
595 /* 3 arguments */
596 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
597 break;
598 case CC_OP_DYNAMIC:
599 /* unknown operation - assume 3 arguments and cc_op in env */
600 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
601 break;
602 default:
603 tcg_abort();
606 if (local_cc_op) {
607 tcg_temp_free_i32(local_cc_op);
609 if (dummy) {
610 tcg_temp_free_i64(dummy);
613 /* We now have cc in cc_op as constant */
614 set_cc_static(s);
617 static bool use_exit_tb(DisasContext *s)
619 return s->base.singlestep_enabled ||
620 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
621 (s->base.tb->flags & FLAG_MASK_PER);
624 static bool use_goto_tb(DisasContext *s, uint64_t dest)
626 if (unlikely(use_exit_tb(s))) {
627 return false;
629 #ifndef CONFIG_USER_ONLY
630 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
631 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
632 #else
633 return true;
634 #endif
637 static void account_noninline_branch(DisasContext *s, int cc_op)
639 #ifdef DEBUG_INLINE_BRANCHES
640 inline_branch_miss[cc_op]++;
641 #endif
644 static void account_inline_branch(DisasContext *s, int cc_op)
646 #ifdef DEBUG_INLINE_BRANCHES
647 inline_branch_hit[cc_op]++;
648 #endif
651 /* Table of mask values to comparison codes, given a comparison as input.
652 For such, CC=3 should not be possible. */
653 static const TCGCond ltgt_cond[16] = {
654 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
655 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
656 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
657 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
658 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
659 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
660 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
661 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
664 /* Table of mask values to comparison codes, given a logic op as input.
665 For such, only CC=0 and CC=1 should be possible. */
666 static const TCGCond nz_cond[16] = {
667 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
668 TCG_COND_NEVER, TCG_COND_NEVER,
669 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
670 TCG_COND_NE, TCG_COND_NE,
671 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
672 TCG_COND_EQ, TCG_COND_EQ,
673 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
674 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
677 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
678 details required to generate a TCG comparison. */
679 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
681 TCGCond cond;
682 enum cc_op old_cc_op = s->cc_op;
684 if (mask == 15 || mask == 0) {
685 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
686 c->u.s32.a = cc_op;
687 c->u.s32.b = cc_op;
688 c->g1 = c->g2 = true;
689 c->is_64 = false;
690 return;
693 /* Find the TCG condition for the mask + cc op. */
694 switch (old_cc_op) {
695 case CC_OP_LTGT0_32:
696 case CC_OP_LTGT0_64:
697 case CC_OP_LTGT_32:
698 case CC_OP_LTGT_64:
699 cond = ltgt_cond[mask];
700 if (cond == TCG_COND_NEVER) {
701 goto do_dynamic;
703 account_inline_branch(s, old_cc_op);
704 break;
706 case CC_OP_LTUGTU_32:
707 case CC_OP_LTUGTU_64:
708 cond = tcg_unsigned_cond(ltgt_cond[mask]);
709 if (cond == TCG_COND_NEVER) {
710 goto do_dynamic;
712 account_inline_branch(s, old_cc_op);
713 break;
715 case CC_OP_NZ:
716 cond = nz_cond[mask];
717 if (cond == TCG_COND_NEVER) {
718 goto do_dynamic;
720 account_inline_branch(s, old_cc_op);
721 break;
723 case CC_OP_TM_32:
724 case CC_OP_TM_64:
725 switch (mask) {
726 case 8:
727 cond = TCG_COND_EQ;
728 break;
729 case 4 | 2 | 1:
730 cond = TCG_COND_NE;
731 break;
732 default:
733 goto do_dynamic;
735 account_inline_branch(s, old_cc_op);
736 break;
738 case CC_OP_ICM:
739 switch (mask) {
740 case 8:
741 cond = TCG_COND_EQ;
742 break;
743 case 4 | 2 | 1:
744 case 4 | 2:
745 cond = TCG_COND_NE;
746 break;
747 default:
748 goto do_dynamic;
750 account_inline_branch(s, old_cc_op);
751 break;
753 case CC_OP_FLOGR:
754 switch (mask & 0xa) {
755 case 8: /* src == 0 -> no one bit found */
756 cond = TCG_COND_EQ;
757 break;
758 case 2: /* src != 0 -> one bit found */
759 cond = TCG_COND_NE;
760 break;
761 default:
762 goto do_dynamic;
764 account_inline_branch(s, old_cc_op);
765 break;
767 case CC_OP_ADDU_32:
768 case CC_OP_ADDU_64:
769 switch (mask) {
770 case 8 | 2: /* vr == 0 */
771 cond = TCG_COND_EQ;
772 break;
773 case 4 | 1: /* vr != 0 */
774 cond = TCG_COND_NE;
775 break;
776 case 8 | 4: /* no carry -> vr >= src */
777 cond = TCG_COND_GEU;
778 break;
779 case 2 | 1: /* carry -> vr < src */
780 cond = TCG_COND_LTU;
781 break;
782 default:
783 goto do_dynamic;
785 account_inline_branch(s, old_cc_op);
786 break;
788 case CC_OP_SUBU_32:
789 case CC_OP_SUBU_64:
790 /* Note that CC=0 is impossible; treat it as dont-care. */
791 switch (mask & 7) {
792 case 2: /* zero -> op1 == op2 */
793 cond = TCG_COND_EQ;
794 break;
795 case 4 | 1: /* !zero -> op1 != op2 */
796 cond = TCG_COND_NE;
797 break;
798 case 4: /* borrow (!carry) -> op1 < op2 */
799 cond = TCG_COND_LTU;
800 break;
801 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
802 cond = TCG_COND_GEU;
803 break;
804 default:
805 goto do_dynamic;
807 account_inline_branch(s, old_cc_op);
808 break;
810 default:
811 do_dynamic:
812 /* Calculate cc value. */
813 gen_op_calc_cc(s);
814 /* FALLTHRU */
816 case CC_OP_STATIC:
817 /* Jump based on CC. We'll load up the real cond below;
818 the assignment here merely avoids a compiler warning. */
819 account_noninline_branch(s, old_cc_op);
820 old_cc_op = CC_OP_STATIC;
821 cond = TCG_COND_NEVER;
822 break;
825 /* Load up the arguments of the comparison. */
826 c->is_64 = true;
827 c->g1 = c->g2 = false;
828 switch (old_cc_op) {
829 case CC_OP_LTGT0_32:
830 c->is_64 = false;
831 c->u.s32.a = tcg_temp_new_i32();
832 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
833 c->u.s32.b = tcg_const_i32(0);
834 break;
835 case CC_OP_LTGT_32:
836 case CC_OP_LTUGTU_32:
837 case CC_OP_SUBU_32:
838 c->is_64 = false;
839 c->u.s32.a = tcg_temp_new_i32();
840 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
841 c->u.s32.b = tcg_temp_new_i32();
842 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
843 break;
845 case CC_OP_LTGT0_64:
846 case CC_OP_NZ:
847 case CC_OP_FLOGR:
848 c->u.s64.a = cc_dst;
849 c->u.s64.b = tcg_const_i64(0);
850 c->g1 = true;
851 break;
852 case CC_OP_LTGT_64:
853 case CC_OP_LTUGTU_64:
854 case CC_OP_SUBU_64:
855 c->u.s64.a = cc_src;
856 c->u.s64.b = cc_dst;
857 c->g1 = c->g2 = true;
858 break;
860 case CC_OP_TM_32:
861 case CC_OP_TM_64:
862 case CC_OP_ICM:
863 c->u.s64.a = tcg_temp_new_i64();
864 c->u.s64.b = tcg_const_i64(0);
865 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
866 break;
868 case CC_OP_ADDU_32:
869 c->is_64 = false;
870 c->u.s32.a = tcg_temp_new_i32();
871 c->u.s32.b = tcg_temp_new_i32();
872 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
873 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
874 tcg_gen_movi_i32(c->u.s32.b, 0);
875 } else {
876 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
878 break;
880 case CC_OP_ADDU_64:
881 c->u.s64.a = cc_vr;
882 c->g1 = true;
883 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
884 c->u.s64.b = tcg_const_i64(0);
885 } else {
886 c->u.s64.b = cc_src;
887 c->g2 = true;
889 break;
891 case CC_OP_STATIC:
892 c->is_64 = false;
893 c->u.s32.a = cc_op;
894 c->g1 = true;
895 switch (mask) {
896 case 0x8 | 0x4 | 0x2: /* cc != 3 */
897 cond = TCG_COND_NE;
898 c->u.s32.b = tcg_const_i32(3);
899 break;
900 case 0x8 | 0x4 | 0x1: /* cc != 2 */
901 cond = TCG_COND_NE;
902 c->u.s32.b = tcg_const_i32(2);
903 break;
904 case 0x8 | 0x2 | 0x1: /* cc != 1 */
905 cond = TCG_COND_NE;
906 c->u.s32.b = tcg_const_i32(1);
907 break;
908 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
909 cond = TCG_COND_EQ;
910 c->g1 = false;
911 c->u.s32.a = tcg_temp_new_i32();
912 c->u.s32.b = tcg_const_i32(0);
913 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
914 break;
915 case 0x8 | 0x4: /* cc < 2 */
916 cond = TCG_COND_LTU;
917 c->u.s32.b = tcg_const_i32(2);
918 break;
919 case 0x8: /* cc == 0 */
920 cond = TCG_COND_EQ;
921 c->u.s32.b = tcg_const_i32(0);
922 break;
923 case 0x4 | 0x2 | 0x1: /* cc != 0 */
924 cond = TCG_COND_NE;
925 c->u.s32.b = tcg_const_i32(0);
926 break;
927 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
928 cond = TCG_COND_NE;
929 c->g1 = false;
930 c->u.s32.a = tcg_temp_new_i32();
931 c->u.s32.b = tcg_const_i32(0);
932 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
933 break;
934 case 0x4: /* cc == 1 */
935 cond = TCG_COND_EQ;
936 c->u.s32.b = tcg_const_i32(1);
937 break;
938 case 0x2 | 0x1: /* cc > 1 */
939 cond = TCG_COND_GTU;
940 c->u.s32.b = tcg_const_i32(1);
941 break;
942 case 0x2: /* cc == 2 */
943 cond = TCG_COND_EQ;
944 c->u.s32.b = tcg_const_i32(2);
945 break;
946 case 0x1: /* cc == 3 */
947 cond = TCG_COND_EQ;
948 c->u.s32.b = tcg_const_i32(3);
949 break;
950 default:
951 /* CC is masked by something else: (8 >> cc) & mask. */
952 cond = TCG_COND_NE;
953 c->g1 = false;
954 c->u.s32.a = tcg_const_i32(8);
955 c->u.s32.b = tcg_const_i32(0);
956 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
957 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
958 break;
960 break;
962 default:
963 abort();
965 c->cond = cond;
968 static void free_compare(DisasCompare *c)
970 if (!c->g1) {
971 if (c->is_64) {
972 tcg_temp_free_i64(c->u.s64.a);
973 } else {
974 tcg_temp_free_i32(c->u.s32.a);
977 if (!c->g2) {
978 if (c->is_64) {
979 tcg_temp_free_i64(c->u.s64.b);
980 } else {
981 tcg_temp_free_i32(c->u.s32.b);
986 /* ====================================================================== */
987 /* Define the insn format enumeration. */
988 #define F0(N) FMT_##N,
989 #define F1(N, X1) F0(N)
990 #define F2(N, X1, X2) F0(N)
991 #define F3(N, X1, X2, X3) F0(N)
992 #define F4(N, X1, X2, X3, X4) F0(N)
993 #define F5(N, X1, X2, X3, X4, X5) F0(N)
994 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
996 typedef enum {
997 #include "insn-format.def"
998 } DisasFormat;
1000 #undef F0
1001 #undef F1
1002 #undef F2
1003 #undef F3
1004 #undef F4
1005 #undef F5
1006 #undef F6
1008 /* Define a structure to hold the decoded fields. We'll store each inside
1009 an array indexed by an enum. In order to conserve memory, we'll arrange
1010 for fields that do not exist at the same time to overlap, thus the "C"
1011 for compact. For checking purposes there is an "O" for original index
1012 as well that will be applied to availability bitmaps. */
1014 enum DisasFieldIndexO {
1015 FLD_O_r1,
1016 FLD_O_r2,
1017 FLD_O_r3,
1018 FLD_O_m1,
1019 FLD_O_m3,
1020 FLD_O_m4,
1021 FLD_O_m5,
1022 FLD_O_m6,
1023 FLD_O_b1,
1024 FLD_O_b2,
1025 FLD_O_b4,
1026 FLD_O_d1,
1027 FLD_O_d2,
1028 FLD_O_d4,
1029 FLD_O_x2,
1030 FLD_O_l1,
1031 FLD_O_l2,
1032 FLD_O_i1,
1033 FLD_O_i2,
1034 FLD_O_i3,
1035 FLD_O_i4,
1036 FLD_O_i5,
1037 FLD_O_v1,
1038 FLD_O_v2,
1039 FLD_O_v3,
1040 FLD_O_v4,
1043 enum DisasFieldIndexC {
1044 FLD_C_r1 = 0,
1045 FLD_C_m1 = 0,
1046 FLD_C_b1 = 0,
1047 FLD_C_i1 = 0,
1048 FLD_C_v1 = 0,
1050 FLD_C_r2 = 1,
1051 FLD_C_b2 = 1,
1052 FLD_C_i2 = 1,
1054 FLD_C_r3 = 2,
1055 FLD_C_m3 = 2,
1056 FLD_C_i3 = 2,
1057 FLD_C_v3 = 2,
1059 FLD_C_m4 = 3,
1060 FLD_C_b4 = 3,
1061 FLD_C_i4 = 3,
1062 FLD_C_l1 = 3,
1063 FLD_C_v4 = 3,
1065 FLD_C_i5 = 4,
1066 FLD_C_d1 = 4,
1067 FLD_C_m5 = 4,
1069 FLD_C_d2 = 5,
1070 FLD_C_m6 = 5,
1072 FLD_C_d4 = 6,
1073 FLD_C_x2 = 6,
1074 FLD_C_l2 = 6,
1075 FLD_C_v2 = 6,
1077 NUM_C_FIELD = 7
1080 struct DisasFields {
1081 uint64_t raw_insn;
1082 unsigned op:8;
1083 unsigned op2:8;
1084 unsigned presentC:16;
1085 unsigned int presentO;
1086 int c[NUM_C_FIELD];
1089 /* This is the way fields are to be accessed out of DisasFields. */
1090 #define have_field(S, F) have_field1((S), FLD_O_##F)
1091 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1093 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1095 return (f->presentO >> c) & 1;
1098 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1099 enum DisasFieldIndexC c)
1101 assert(have_field1(f, o));
1102 return f->c[c];
1105 /* Describe the layout of each field in each format. */
1106 typedef struct DisasField {
1107 unsigned int beg:8;
1108 unsigned int size:8;
1109 unsigned int type:2;
1110 unsigned int indexC:6;
1111 enum DisasFieldIndexO indexO:8;
1112 } DisasField;
1114 typedef struct DisasFormatInfo {
1115 DisasField op[NUM_C_FIELD];
1116 } DisasFormatInfo;
1118 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1119 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1120 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1121 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1122 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1123 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1125 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1126 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1127 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1128 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1129 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1130 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1131 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1132 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1134 #define F0(N) { { } },
1135 #define F1(N, X1) { { X1 } },
1136 #define F2(N, X1, X2) { { X1, X2 } },
1137 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1138 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1139 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1140 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1142 static const DisasFormatInfo format_info[] = {
1143 #include "insn-format.def"
1146 #undef F0
1147 #undef F1
1148 #undef F2
1149 #undef F3
1150 #undef F4
1151 #undef F5
1152 #undef F6
1153 #undef R
1154 #undef M
1155 #undef V
1156 #undef BD
1157 #undef BXD
1158 #undef BDL
1159 #undef BXDL
1160 #undef I
1161 #undef L
1163 /* Generally, we'll extract operands into this structures, operate upon
1164 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1165 of routines below for more details. */
1166 typedef struct {
1167 bool g_out, g_out2, g_in1, g_in2;
1168 TCGv_i64 out, out2, in1, in2;
1169 TCGv_i64 addr1;
1170 } DisasOps;
1172 /* Instructions can place constraints on their operands, raising specification
1173 exceptions if they are violated. To make this easy to automate, each "in1",
1174 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1175 of the following, or 0. To make this easy to document, we'll put the
1176 SPEC_<name> defines next to <name>. */
1178 #define SPEC_r1_even 1
1179 #define SPEC_r2_even 2
1180 #define SPEC_r3_even 4
1181 #define SPEC_r1_f128 8
1182 #define SPEC_r2_f128 16
1184 /* Return values from translate_one, indicating the state of the TB. */
1186 /* We are not using a goto_tb (for whatever reason), but have updated
1187 the PC (for whatever reason), so there's no need to do it again on
1188 exiting the TB. */
1189 #define DISAS_PC_UPDATED DISAS_TARGET_0
1191 /* We have emitted one or more goto_tb. No fixup required. */
1192 #define DISAS_GOTO_TB DISAS_TARGET_1
1194 /* We have updated the PC and CC values. */
1195 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1197 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1198 updated the PC for the next instruction to be executed. */
1199 #define DISAS_PC_STALE DISAS_TARGET_3
1201 /* We are exiting the TB to the main loop. */
1202 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1205 /* Instruction flags */
1206 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1207 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1208 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1209 #define IF_BFP 0x0008 /* binary floating point instruction */
1210 #define IF_DFP 0x0010 /* decimal floating point instruction */
1211 #define IF_PRIV 0x0020 /* privileged instruction */
1212 #define IF_VEC 0x0040 /* vector instruction */
1214 struct DisasInsn {
1215 unsigned opc:16;
1216 unsigned flags:16;
1217 DisasFormat fmt:8;
1218 unsigned fac:8;
1219 unsigned spec:8;
1221 const char *name;
1223 /* Pre-process arguments before HELP_OP. */
1224 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1225 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1226 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1229 * Post-process output after HELP_OP.
1230 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1232 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1233 void (*help_cout)(DisasContext *, DisasOps *);
1235 /* Implement the operation itself. */
1236 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1238 uint64_t data;
1241 /* ====================================================================== */
1242 /* Miscellaneous helpers, used by several operations. */
1244 static void help_l2_shift(DisasContext *s, DisasFields *f,
1245 DisasOps *o, int mask)
1247 int b2 = get_field(f, b2);
1248 int d2 = get_field(f, d2);
1250 if (b2 == 0) {
1251 o->in2 = tcg_const_i64(d2 & mask);
1252 } else {
1253 o->in2 = get_address(s, 0, b2, d2);
1254 tcg_gen_andi_i64(o->in2, o->in2, mask);
1258 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1260 if (dest == s->pc_tmp) {
1261 per_branch(s, true);
1262 return DISAS_NEXT;
1264 if (use_goto_tb(s, dest)) {
1265 update_cc_op(s);
1266 per_breaking_event(s);
1267 tcg_gen_goto_tb(0);
1268 tcg_gen_movi_i64(psw_addr, dest);
1269 tcg_gen_exit_tb(s->base.tb, 0);
1270 return DISAS_GOTO_TB;
1271 } else {
1272 tcg_gen_movi_i64(psw_addr, dest);
1273 per_branch(s, false);
1274 return DISAS_PC_UPDATED;
1278 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1279 bool is_imm, int imm, TCGv_i64 cdest)
1281 DisasJumpType ret;
1282 uint64_t dest = s->base.pc_next + 2 * imm;
1283 TCGLabel *lab;
1285 /* Take care of the special cases first. */
1286 if (c->cond == TCG_COND_NEVER) {
1287 ret = DISAS_NEXT;
1288 goto egress;
1290 if (is_imm) {
1291 if (dest == s->pc_tmp) {
1292 /* Branch to next. */
1293 per_branch(s, true);
1294 ret = DISAS_NEXT;
1295 goto egress;
1297 if (c->cond == TCG_COND_ALWAYS) {
1298 ret = help_goto_direct(s, dest);
1299 goto egress;
1301 } else {
1302 if (!cdest) {
1303 /* E.g. bcr %r0 -> no branch. */
1304 ret = DISAS_NEXT;
1305 goto egress;
1307 if (c->cond == TCG_COND_ALWAYS) {
1308 tcg_gen_mov_i64(psw_addr, cdest);
1309 per_branch(s, false);
1310 ret = DISAS_PC_UPDATED;
1311 goto egress;
1315 if (use_goto_tb(s, s->pc_tmp)) {
1316 if (is_imm && use_goto_tb(s, dest)) {
1317 /* Both exits can use goto_tb. */
1318 update_cc_op(s);
1320 lab = gen_new_label();
1321 if (c->is_64) {
1322 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1323 } else {
1324 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1327 /* Branch not taken. */
1328 tcg_gen_goto_tb(0);
1329 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1330 tcg_gen_exit_tb(s->base.tb, 0);
1332 /* Branch taken. */
1333 gen_set_label(lab);
1334 per_breaking_event(s);
1335 tcg_gen_goto_tb(1);
1336 tcg_gen_movi_i64(psw_addr, dest);
1337 tcg_gen_exit_tb(s->base.tb, 1);
1339 ret = DISAS_GOTO_TB;
1340 } else {
1341 /* Fallthru can use goto_tb, but taken branch cannot. */
1342 /* Store taken branch destination before the brcond. This
1343 avoids having to allocate a new local temp to hold it.
1344 We'll overwrite this in the not taken case anyway. */
1345 if (!is_imm) {
1346 tcg_gen_mov_i64(psw_addr, cdest);
1349 lab = gen_new_label();
1350 if (c->is_64) {
1351 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1352 } else {
1353 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1356 /* Branch not taken. */
1357 update_cc_op(s);
1358 tcg_gen_goto_tb(0);
1359 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1360 tcg_gen_exit_tb(s->base.tb, 0);
1362 gen_set_label(lab);
1363 if (is_imm) {
1364 tcg_gen_movi_i64(psw_addr, dest);
1366 per_breaking_event(s);
1367 ret = DISAS_PC_UPDATED;
1369 } else {
1370 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1371 Most commonly we're single-stepping or some other condition that
1372 disables all use of goto_tb. Just update the PC and exit. */
1374 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1375 if (is_imm) {
1376 cdest = tcg_const_i64(dest);
1379 if (c->is_64) {
1380 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1381 cdest, next);
1382 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1383 } else {
1384 TCGv_i32 t0 = tcg_temp_new_i32();
1385 TCGv_i64 t1 = tcg_temp_new_i64();
1386 TCGv_i64 z = tcg_const_i64(0);
1387 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1388 tcg_gen_extu_i32_i64(t1, t0);
1389 tcg_temp_free_i32(t0);
1390 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1391 per_branch_cond(s, TCG_COND_NE, t1, z);
1392 tcg_temp_free_i64(t1);
1393 tcg_temp_free_i64(z);
1396 if (is_imm) {
1397 tcg_temp_free_i64(cdest);
1399 tcg_temp_free_i64(next);
1401 ret = DISAS_PC_UPDATED;
1404 egress:
1405 free_compare(c);
1406 return ret;
1409 /* ====================================================================== */
1410 /* The operations. These perform the bulk of the work for any insn,
1411 usually after the operands have been loaded and output initialized. */
1413 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1415 tcg_gen_abs_i64(o->out, o->in2);
1416 return DISAS_NEXT;
1419 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1421 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1422 return DISAS_NEXT;
1425 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1427 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1428 return DISAS_NEXT;
1431 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1433 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1434 tcg_gen_mov_i64(o->out2, o->in2);
1435 return DISAS_NEXT;
1438 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1440 tcg_gen_add_i64(o->out, o->in1, o->in2);
1441 return DISAS_NEXT;
1444 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1446 DisasCompare cmp;
1447 TCGv_i64 carry;
1449 tcg_gen_add_i64(o->out, o->in1, o->in2);
1451 /* The carry flag is the msb of CC, therefore the branch mask that would
1452 create that comparison is 3. Feeding the generated comparison to
1453 setcond produces the carry flag that we desire. */
1454 disas_jcc(s, &cmp, 3);
1455 carry = tcg_temp_new_i64();
1456 if (cmp.is_64) {
1457 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1458 } else {
1459 TCGv_i32 t = tcg_temp_new_i32();
1460 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1461 tcg_gen_extu_i32_i64(carry, t);
1462 tcg_temp_free_i32(t);
1464 free_compare(&cmp);
1466 tcg_gen_add_i64(o->out, o->out, carry);
1467 tcg_temp_free_i64(carry);
1468 return DISAS_NEXT;
1471 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1473 o->in1 = tcg_temp_new_i64();
1475 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1476 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1477 } else {
1478 /* Perform the atomic addition in memory. */
1479 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1480 s->insn->data);
1483 /* Recompute also for atomic case: needed for setting CC. */
1484 tcg_gen_add_i64(o->out, o->in1, o->in2);
1486 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1487 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1489 return DISAS_NEXT;
1492 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1494 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1495 return DISAS_NEXT;
1498 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1500 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1501 return DISAS_NEXT;
1504 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1506 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1507 return_low128(o->out2);
1508 return DISAS_NEXT;
1511 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1513 tcg_gen_and_i64(o->out, o->in1, o->in2);
1514 return DISAS_NEXT;
1517 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1519 int shift = s->insn->data & 0xff;
1520 int size = s->insn->data >> 8;
1521 uint64_t mask = ((1ull << size) - 1) << shift;
1523 assert(!o->g_in2);
1524 tcg_gen_shli_i64(o->in2, o->in2, shift);
1525 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1526 tcg_gen_and_i64(o->out, o->in1, o->in2);
1528 /* Produce the CC from only the bits manipulated. */
1529 tcg_gen_andi_i64(cc_dst, o->out, mask);
1530 set_cc_nz_u64(s, cc_dst);
1531 return DISAS_NEXT;
1534 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1536 o->in1 = tcg_temp_new_i64();
1538 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1539 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1540 } else {
1541 /* Perform the atomic operation in memory. */
1542 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1543 s->insn->data);
1546 /* Recompute also for atomic case: needed for setting CC. */
1547 tcg_gen_and_i64(o->out, o->in1, o->in2);
1549 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1550 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1552 return DISAS_NEXT;
1555 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1557 pc_to_link_info(o->out, s, s->pc_tmp);
1558 if (o->in2) {
1559 tcg_gen_mov_i64(psw_addr, o->in2);
1560 per_branch(s, false);
1561 return DISAS_PC_UPDATED;
1562 } else {
1563 return DISAS_NEXT;
1567 static void save_link_info(DisasContext *s, DisasOps *o)
1569 TCGv_i64 t;
1571 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1572 pc_to_link_info(o->out, s, s->pc_tmp);
1573 return;
1575 gen_op_calc_cc(s);
1576 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1577 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1578 t = tcg_temp_new_i64();
1579 tcg_gen_shri_i64(t, psw_mask, 16);
1580 tcg_gen_andi_i64(t, t, 0x0f000000);
1581 tcg_gen_or_i64(o->out, o->out, t);
1582 tcg_gen_extu_i32_i64(t, cc_op);
1583 tcg_gen_shli_i64(t, t, 28);
1584 tcg_gen_or_i64(o->out, o->out, t);
1585 tcg_temp_free_i64(t);
1588 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1590 save_link_info(s, o);
1591 if (o->in2) {
1592 tcg_gen_mov_i64(psw_addr, o->in2);
1593 per_branch(s, false);
1594 return DISAS_PC_UPDATED;
1595 } else {
1596 return DISAS_NEXT;
1600 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1602 pc_to_link_info(o->out, s, s->pc_tmp);
1603 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1606 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1608 int m1 = get_field(s->fields, m1);
1609 bool is_imm = have_field(s->fields, i2);
1610 int imm = is_imm ? get_field(s->fields, i2) : 0;
1611 DisasCompare c;
1613 /* BCR with R2 = 0 causes no branching */
1614 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1615 if (m1 == 14) {
1616 /* Perform serialization */
1617 /* FIXME: check for fast-BCR-serialization facility */
1618 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1620 if (m1 == 15) {
1621 /* Perform serialization */
1622 /* FIXME: perform checkpoint-synchronisation */
1623 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1625 return DISAS_NEXT;
1628 disas_jcc(s, &c, m1);
1629 return help_branch(s, &c, is_imm, imm, o->in2);
1632 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1634 int r1 = get_field(s->fields, r1);
1635 bool is_imm = have_field(s->fields, i2);
1636 int imm = is_imm ? get_field(s->fields, i2) : 0;
1637 DisasCompare c;
1638 TCGv_i64 t;
1640 c.cond = TCG_COND_NE;
1641 c.is_64 = false;
1642 c.g1 = false;
1643 c.g2 = false;
1645 t = tcg_temp_new_i64();
1646 tcg_gen_subi_i64(t, regs[r1], 1);
1647 store_reg32_i64(r1, t);
1648 c.u.s32.a = tcg_temp_new_i32();
1649 c.u.s32.b = tcg_const_i32(0);
1650 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1651 tcg_temp_free_i64(t);
1653 return help_branch(s, &c, is_imm, imm, o->in2);
1656 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1658 int r1 = get_field(s->fields, r1);
1659 int imm = get_field(s->fields, i2);
1660 DisasCompare c;
1661 TCGv_i64 t;
1663 c.cond = TCG_COND_NE;
1664 c.is_64 = false;
1665 c.g1 = false;
1666 c.g2 = false;
1668 t = tcg_temp_new_i64();
1669 tcg_gen_shri_i64(t, regs[r1], 32);
1670 tcg_gen_subi_i64(t, t, 1);
1671 store_reg32h_i64(r1, t);
1672 c.u.s32.a = tcg_temp_new_i32();
1673 c.u.s32.b = tcg_const_i32(0);
1674 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1675 tcg_temp_free_i64(t);
1677 return help_branch(s, &c, 1, imm, o->in2);
1680 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1682 int r1 = get_field(s->fields, r1);
1683 bool is_imm = have_field(s->fields, i2);
1684 int imm = is_imm ? get_field(s->fields, i2) : 0;
1685 DisasCompare c;
1687 c.cond = TCG_COND_NE;
1688 c.is_64 = true;
1689 c.g1 = true;
1690 c.g2 = false;
1692 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1693 c.u.s64.a = regs[r1];
1694 c.u.s64.b = tcg_const_i64(0);
1696 return help_branch(s, &c, is_imm, imm, o->in2);
1699 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1701 int r1 = get_field(s->fields, r1);
1702 int r3 = get_field(s->fields, r3);
1703 bool is_imm = have_field(s->fields, i2);
1704 int imm = is_imm ? get_field(s->fields, i2) : 0;
1705 DisasCompare c;
1706 TCGv_i64 t;
1708 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1709 c.is_64 = false;
1710 c.g1 = false;
1711 c.g2 = false;
1713 t = tcg_temp_new_i64();
1714 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1715 c.u.s32.a = tcg_temp_new_i32();
1716 c.u.s32.b = tcg_temp_new_i32();
1717 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1718 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1719 store_reg32_i64(r1, t);
1720 tcg_temp_free_i64(t);
1722 return help_branch(s, &c, is_imm, imm, o->in2);
1725 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1727 int r1 = get_field(s->fields, r1);
1728 int r3 = get_field(s->fields, r3);
1729 bool is_imm = have_field(s->fields, i2);
1730 int imm = is_imm ? get_field(s->fields, i2) : 0;
1731 DisasCompare c;
1733 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1734 c.is_64 = true;
1736 if (r1 == (r3 | 1)) {
1737 c.u.s64.b = load_reg(r3 | 1);
1738 c.g2 = false;
1739 } else {
1740 c.u.s64.b = regs[r3 | 1];
1741 c.g2 = true;
1744 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1745 c.u.s64.a = regs[r1];
1746 c.g1 = true;
1748 return help_branch(s, &c, is_imm, imm, o->in2);
1751 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1753 int imm, m3 = get_field(s->fields, m3);
1754 bool is_imm;
1755 DisasCompare c;
1757 c.cond = ltgt_cond[m3];
1758 if (s->insn->data) {
1759 c.cond = tcg_unsigned_cond(c.cond);
1761 c.is_64 = c.g1 = c.g2 = true;
1762 c.u.s64.a = o->in1;
1763 c.u.s64.b = o->in2;
1765 is_imm = have_field(s->fields, i4);
1766 if (is_imm) {
1767 imm = get_field(s->fields, i4);
1768 } else {
1769 imm = 0;
1770 o->out = get_address(s, 0, get_field(s->fields, b4),
1771 get_field(s->fields, d4));
1774 return help_branch(s, &c, is_imm, imm, o->out);
1777 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1779 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1780 set_cc_static(s);
1781 return DISAS_NEXT;
1784 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1786 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1787 set_cc_static(s);
1788 return DISAS_NEXT;
1791 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1793 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1794 set_cc_static(s);
1795 return DISAS_NEXT;
1798 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1799 bool m4_with_fpe)
1801 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1802 uint8_t m3 = get_field(s->fields, m3);
1803 uint8_t m4 = get_field(s->fields, m4);
1805 /* m3 field was introduced with FPE */
1806 if (!fpe && m3_with_fpe) {
1807 m3 = 0;
1809 /* m4 field was introduced with FPE */
1810 if (!fpe && m4_with_fpe) {
1811 m4 = 0;
1814 /* Check for valid rounding modes. Mode 3 was introduced later. */
1815 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1816 gen_program_exception(s, PGM_SPECIFICATION);
1817 return NULL;
1820 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1823 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1825 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1827 if (!m34) {
1828 return DISAS_NORETURN;
1830 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1831 tcg_temp_free_i32(m34);
1832 gen_set_cc_nz_f32(s, o->in2);
1833 return DISAS_NEXT;
1836 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1838 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1840 if (!m34) {
1841 return DISAS_NORETURN;
1843 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1844 tcg_temp_free_i32(m34);
1845 gen_set_cc_nz_f64(s, o->in2);
1846 return DISAS_NEXT;
1849 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1851 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1853 if (!m34) {
1854 return DISAS_NORETURN;
1856 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1857 tcg_temp_free_i32(m34);
1858 gen_set_cc_nz_f128(s, o->in1, o->in2);
1859 return DISAS_NEXT;
1862 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1864 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1866 if (!m34) {
1867 return DISAS_NORETURN;
1869 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1870 tcg_temp_free_i32(m34);
1871 gen_set_cc_nz_f32(s, o->in2);
1872 return DISAS_NEXT;
1875 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1877 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1879 if (!m34) {
1880 return DISAS_NORETURN;
1882 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1883 tcg_temp_free_i32(m34);
1884 gen_set_cc_nz_f64(s, o->in2);
1885 return DISAS_NEXT;
1888 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1890 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1892 if (!m34) {
1893 return DISAS_NORETURN;
1895 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1896 tcg_temp_free_i32(m34);
1897 gen_set_cc_nz_f128(s, o->in1, o->in2);
1898 return DISAS_NEXT;
1901 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1903 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1905 if (!m34) {
1906 return DISAS_NORETURN;
1908 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1909 tcg_temp_free_i32(m34);
1910 gen_set_cc_nz_f32(s, o->in2);
1911 return DISAS_NEXT;
1914 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1916 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1918 if (!m34) {
1919 return DISAS_NORETURN;
1921 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1922 tcg_temp_free_i32(m34);
1923 gen_set_cc_nz_f64(s, o->in2);
1924 return DISAS_NEXT;
1927 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1929 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1931 if (!m34) {
1932 return DISAS_NORETURN;
1934 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1935 tcg_temp_free_i32(m34);
1936 gen_set_cc_nz_f128(s, o->in1, o->in2);
1937 return DISAS_NEXT;
1940 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1942 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1944 if (!m34) {
1945 return DISAS_NORETURN;
1947 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1948 tcg_temp_free_i32(m34);
1949 gen_set_cc_nz_f32(s, o->in2);
1950 return DISAS_NEXT;
1953 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1955 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1957 if (!m34) {
1958 return DISAS_NORETURN;
1960 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1961 tcg_temp_free_i32(m34);
1962 gen_set_cc_nz_f64(s, o->in2);
1963 return DISAS_NEXT;
1966 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1968 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1970 if (!m34) {
1971 return DISAS_NORETURN;
1973 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1974 tcg_temp_free_i32(m34);
1975 gen_set_cc_nz_f128(s, o->in1, o->in2);
1976 return DISAS_NEXT;
1979 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1981 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1983 if (!m34) {
1984 return DISAS_NORETURN;
1986 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1987 tcg_temp_free_i32(m34);
1988 return DISAS_NEXT;
1991 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1993 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1995 if (!m34) {
1996 return DISAS_NORETURN;
1998 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1999 tcg_temp_free_i32(m34);
2000 return DISAS_NEXT;
2003 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2005 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2007 if (!m34) {
2008 return DISAS_NORETURN;
2010 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2011 tcg_temp_free_i32(m34);
2012 return_low128(o->out2);
2013 return DISAS_NEXT;
2016 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2018 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2020 if (!m34) {
2021 return DISAS_NORETURN;
2023 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2024 tcg_temp_free_i32(m34);
2025 return DISAS_NEXT;
2028 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2030 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2032 if (!m34) {
2033 return DISAS_NORETURN;
2035 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2036 tcg_temp_free_i32(m34);
2037 return DISAS_NEXT;
2040 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2042 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2044 if (!m34) {
2045 return DISAS_NORETURN;
2047 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2048 tcg_temp_free_i32(m34);
2049 return_low128(o->out2);
2050 return DISAS_NEXT;
2053 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2055 int r2 = get_field(s->fields, r2);
2056 TCGv_i64 len = tcg_temp_new_i64();
2058 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2059 set_cc_static(s);
2060 return_low128(o->out);
2062 tcg_gen_add_i64(regs[r2], regs[r2], len);
2063 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2064 tcg_temp_free_i64(len);
2066 return DISAS_NEXT;
2069 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2071 int l = get_field(s->fields, l1);
2072 TCGv_i32 vl;
2074 switch (l + 1) {
2075 case 1:
2076 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2077 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2078 break;
2079 case 2:
2080 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2081 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2082 break;
2083 case 4:
2084 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2085 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2086 break;
2087 case 8:
2088 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2089 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2090 break;
2091 default:
2092 vl = tcg_const_i32(l);
2093 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2094 tcg_temp_free_i32(vl);
2095 set_cc_static(s);
2096 return DISAS_NEXT;
2098 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2099 return DISAS_NEXT;
2102 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2104 int r1 = get_field(s->fields, r1);
2105 int r2 = get_field(s->fields, r2);
2106 TCGv_i32 t1, t2;
2108 /* r1 and r2 must be even. */
2109 if (r1 & 1 || r2 & 1) {
2110 gen_program_exception(s, PGM_SPECIFICATION);
2111 return DISAS_NORETURN;
2114 t1 = tcg_const_i32(r1);
2115 t2 = tcg_const_i32(r2);
2116 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2117 tcg_temp_free_i32(t1);
2118 tcg_temp_free_i32(t2);
2119 set_cc_static(s);
2120 return DISAS_NEXT;
2123 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2125 int r1 = get_field(s->fields, r1);
2126 int r3 = get_field(s->fields, r3);
2127 TCGv_i32 t1, t3;
2129 /* r1 and r3 must be even. */
2130 if (r1 & 1 || r3 & 1) {
2131 gen_program_exception(s, PGM_SPECIFICATION);
2132 return DISAS_NORETURN;
2135 t1 = tcg_const_i32(r1);
2136 t3 = tcg_const_i32(r3);
2137 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2138 tcg_temp_free_i32(t1);
2139 tcg_temp_free_i32(t3);
2140 set_cc_static(s);
2141 return DISAS_NEXT;
2144 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2146 int r1 = get_field(s->fields, r1);
2147 int r3 = get_field(s->fields, r3);
2148 TCGv_i32 t1, t3;
2150 /* r1 and r3 must be even. */
2151 if (r1 & 1 || r3 & 1) {
2152 gen_program_exception(s, PGM_SPECIFICATION);
2153 return DISAS_NORETURN;
2156 t1 = tcg_const_i32(r1);
2157 t3 = tcg_const_i32(r3);
2158 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2159 tcg_temp_free_i32(t1);
2160 tcg_temp_free_i32(t3);
2161 set_cc_static(s);
2162 return DISAS_NEXT;
2165 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2167 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2168 TCGv_i32 t1 = tcg_temp_new_i32();
2169 tcg_gen_extrl_i64_i32(t1, o->in1);
2170 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2171 set_cc_static(s);
2172 tcg_temp_free_i32(t1);
2173 tcg_temp_free_i32(m3);
2174 return DISAS_NEXT;
2177 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2179 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2180 set_cc_static(s);
2181 return_low128(o->in2);
2182 return DISAS_NEXT;
2185 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2187 TCGv_i64 t = tcg_temp_new_i64();
2188 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2189 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2190 tcg_gen_or_i64(o->out, o->out, t);
2191 tcg_temp_free_i64(t);
2192 return DISAS_NEXT;
2195 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2197 int d2 = get_field(s->fields, d2);
2198 int b2 = get_field(s->fields, b2);
2199 TCGv_i64 addr, cc;
2201 /* Note that in1 = R3 (new value) and
2202 in2 = (zero-extended) R1 (expected value). */
2204 addr = get_address(s, 0, b2, d2);
2205 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2206 get_mem_index(s), s->insn->data | MO_ALIGN);
2207 tcg_temp_free_i64(addr);
2209 /* Are the memory and expected values (un)equal? Note that this setcond
2210 produces the output CC value, thus the NE sense of the test. */
2211 cc = tcg_temp_new_i64();
2212 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2213 tcg_gen_extrl_i64_i32(cc_op, cc);
2214 tcg_temp_free_i64(cc);
2215 set_cc_static(s);
2217 return DISAS_NEXT;
2220 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2222 int r1 = get_field(s->fields, r1);
2223 int r3 = get_field(s->fields, r3);
2224 int d2 = get_field(s->fields, d2);
2225 int b2 = get_field(s->fields, b2);
2226 DisasJumpType ret = DISAS_NEXT;
2227 TCGv_i64 addr;
2228 TCGv_i32 t_r1, t_r3;
2230 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2231 addr = get_address(s, 0, b2, d2);
2232 t_r1 = tcg_const_i32(r1);
2233 t_r3 = tcg_const_i32(r3);
2234 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2235 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2236 } else if (HAVE_CMPXCHG128) {
2237 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2238 } else {
2239 gen_helper_exit_atomic(cpu_env);
2240 ret = DISAS_NORETURN;
2242 tcg_temp_free_i64(addr);
2243 tcg_temp_free_i32(t_r1);
2244 tcg_temp_free_i32(t_r3);
2246 set_cc_static(s);
2247 return ret;
2250 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2252 int r3 = get_field(s->fields, r3);
2253 TCGv_i32 t_r3 = tcg_const_i32(r3);
2255 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2256 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2257 } else {
2258 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2260 tcg_temp_free_i32(t_r3);
2262 set_cc_static(s);
2263 return DISAS_NEXT;
2266 #ifndef CONFIG_USER_ONLY
2267 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2269 MemOp mop = s->insn->data;
2270 TCGv_i64 addr, old, cc;
2271 TCGLabel *lab = gen_new_label();
2273 /* Note that in1 = R1 (zero-extended expected value),
2274 out = R1 (original reg), out2 = R1+1 (new value). */
2276 addr = tcg_temp_new_i64();
2277 old = tcg_temp_new_i64();
2278 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2279 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2280 get_mem_index(s), mop | MO_ALIGN);
2281 tcg_temp_free_i64(addr);
2283 /* Are the memory and expected values (un)equal? */
2284 cc = tcg_temp_new_i64();
2285 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2286 tcg_gen_extrl_i64_i32(cc_op, cc);
2288 /* Write back the output now, so that it happens before the
2289 following branch, so that we don't need local temps. */
2290 if ((mop & MO_SIZE) == MO_32) {
2291 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2292 } else {
2293 tcg_gen_mov_i64(o->out, old);
2295 tcg_temp_free_i64(old);
2297 /* If the comparison was equal, and the LSB of R2 was set,
2298 then we need to flush the TLB (for all cpus). */
2299 tcg_gen_xori_i64(cc, cc, 1);
2300 tcg_gen_and_i64(cc, cc, o->in2);
2301 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2302 tcg_temp_free_i64(cc);
2304 gen_helper_purge(cpu_env);
2305 gen_set_label(lab);
2307 return DISAS_NEXT;
2309 #endif
2311 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2313 TCGv_i64 t1 = tcg_temp_new_i64();
2314 TCGv_i32 t2 = tcg_temp_new_i32();
2315 tcg_gen_extrl_i64_i32(t2, o->in1);
2316 gen_helper_cvd(t1, t2);
2317 tcg_temp_free_i32(t2);
2318 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2319 tcg_temp_free_i64(t1);
2320 return DISAS_NEXT;
2323 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2325 int m3 = get_field(s->fields, m3);
2326 TCGLabel *lab = gen_new_label();
2327 TCGCond c;
2329 c = tcg_invert_cond(ltgt_cond[m3]);
2330 if (s->insn->data) {
2331 c = tcg_unsigned_cond(c);
2333 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2335 /* Trap. */
2336 gen_trap(s);
2338 gen_set_label(lab);
2339 return DISAS_NEXT;
2342 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2344 int m3 = get_field(s->fields, m3);
2345 int r1 = get_field(s->fields, r1);
2346 int r2 = get_field(s->fields, r2);
2347 TCGv_i32 tr1, tr2, chk;
2349 /* R1 and R2 must both be even. */
2350 if ((r1 | r2) & 1) {
2351 gen_program_exception(s, PGM_SPECIFICATION);
2352 return DISAS_NORETURN;
2354 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2355 m3 = 0;
2358 tr1 = tcg_const_i32(r1);
2359 tr2 = tcg_const_i32(r2);
2360 chk = tcg_const_i32(m3);
2362 switch (s->insn->data) {
2363 case 12:
2364 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2365 break;
2366 case 14:
2367 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2368 break;
2369 case 21:
2370 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2371 break;
2372 case 24:
2373 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2374 break;
2375 case 41:
2376 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2377 break;
2378 case 42:
2379 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2380 break;
2381 default:
2382 g_assert_not_reached();
2385 tcg_temp_free_i32(tr1);
2386 tcg_temp_free_i32(tr2);
2387 tcg_temp_free_i32(chk);
2388 set_cc_static(s);
2389 return DISAS_NEXT;
2392 #ifndef CONFIG_USER_ONLY
2393 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2395 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2396 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2397 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2399 gen_helper_diag(cpu_env, r1, r3, func_code);
2401 tcg_temp_free_i32(func_code);
2402 tcg_temp_free_i32(r3);
2403 tcg_temp_free_i32(r1);
2404 return DISAS_NEXT;
2406 #endif
2408 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2410 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2411 return_low128(o->out);
2412 return DISAS_NEXT;
2415 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2417 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2418 return_low128(o->out);
2419 return DISAS_NEXT;
2422 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2424 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2425 return_low128(o->out);
2426 return DISAS_NEXT;
2429 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2431 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2432 return_low128(o->out);
2433 return DISAS_NEXT;
2436 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2438 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2439 return DISAS_NEXT;
2442 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2444 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2445 return DISAS_NEXT;
2448 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2450 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2451 return_low128(o->out2);
2452 return DISAS_NEXT;
2455 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2457 int r2 = get_field(s->fields, r2);
2458 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2459 return DISAS_NEXT;
2462 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2464 /* No cache information provided. */
2465 tcg_gen_movi_i64(o->out, -1);
2466 return DISAS_NEXT;
2469 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2471 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2472 return DISAS_NEXT;
2475 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2477 int r1 = get_field(s->fields, r1);
2478 int r2 = get_field(s->fields, r2);
2479 TCGv_i64 t = tcg_temp_new_i64();
2481 /* Note the "subsequently" in the PoO, which implies a defined result
2482 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2483 tcg_gen_shri_i64(t, psw_mask, 32);
2484 store_reg32_i64(r1, t);
2485 if (r2 != 0) {
2486 store_reg32_i64(r2, psw_mask);
2489 tcg_temp_free_i64(t);
2490 return DISAS_NEXT;
2493 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2495 int r1 = get_field(s->fields, r1);
2496 TCGv_i32 ilen;
2497 TCGv_i64 v1;
2499 /* Nested EXECUTE is not allowed. */
2500 if (unlikely(s->ex_value)) {
2501 gen_program_exception(s, PGM_EXECUTE);
2502 return DISAS_NORETURN;
2505 update_psw_addr(s);
2506 update_cc_op(s);
2508 if (r1 == 0) {
2509 v1 = tcg_const_i64(0);
2510 } else {
2511 v1 = regs[r1];
2514 ilen = tcg_const_i32(s->ilen);
2515 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2516 tcg_temp_free_i32(ilen);
2518 if (r1 == 0) {
2519 tcg_temp_free_i64(v1);
2522 return DISAS_PC_CC_UPDATED;
2525 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2527 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2529 if (!m34) {
2530 return DISAS_NORETURN;
2532 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2533 tcg_temp_free_i32(m34);
2534 return DISAS_NEXT;
2537 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2539 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2541 if (!m34) {
2542 return DISAS_NORETURN;
2544 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2545 tcg_temp_free_i32(m34);
2546 return DISAS_NEXT;
2549 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2551 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2553 if (!m34) {
2554 return DISAS_NORETURN;
2556 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2557 return_low128(o->out2);
2558 tcg_temp_free_i32(m34);
2559 return DISAS_NEXT;
2562 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2564 /* We'll use the original input for cc computation, since we get to
2565 compare that against 0, which ought to be better than comparing
2566 the real output against 64. It also lets cc_dst be a convenient
2567 temporary during our computation. */
2568 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2570 /* R1 = IN ? CLZ(IN) : 64. */
2571 tcg_gen_clzi_i64(o->out, o->in2, 64);
2573 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2574 value by 64, which is undefined. But since the shift is 64 iff the
2575 input is zero, we still get the correct result after and'ing. */
2576 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2577 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2578 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2579 return DISAS_NEXT;
2582 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2584 int m3 = get_field(s->fields, m3);
2585 int pos, len, base = s->insn->data;
2586 TCGv_i64 tmp = tcg_temp_new_i64();
2587 uint64_t ccm;
2589 switch (m3) {
2590 case 0xf:
2591 /* Effectively a 32-bit load. */
2592 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2593 len = 32;
2594 goto one_insert;
2596 case 0xc:
2597 case 0x6:
2598 case 0x3:
2599 /* Effectively a 16-bit load. */
2600 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2601 len = 16;
2602 goto one_insert;
2604 case 0x8:
2605 case 0x4:
2606 case 0x2:
2607 case 0x1:
2608 /* Effectively an 8-bit load. */
2609 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2610 len = 8;
2611 goto one_insert;
2613 one_insert:
2614 pos = base + ctz32(m3) * 8;
2615 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2616 ccm = ((1ull << len) - 1) << pos;
2617 break;
2619 default:
2620 /* This is going to be a sequence of loads and inserts. */
2621 pos = base + 32 - 8;
2622 ccm = 0;
2623 while (m3) {
2624 if (m3 & 0x8) {
2625 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2626 tcg_gen_addi_i64(o->in2, o->in2, 1);
2627 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2628 ccm |= 0xff << pos;
2630 m3 = (m3 << 1) & 0xf;
2631 pos -= 8;
2633 break;
2636 tcg_gen_movi_i64(tmp, ccm);
2637 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2638 tcg_temp_free_i64(tmp);
2639 return DISAS_NEXT;
2642 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2644 int shift = s->insn->data & 0xff;
2645 int size = s->insn->data >> 8;
2646 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2647 return DISAS_NEXT;
2650 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2652 TCGv_i64 t1, t2;
2654 gen_op_calc_cc(s);
2655 t1 = tcg_temp_new_i64();
2656 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2657 t2 = tcg_temp_new_i64();
2658 tcg_gen_extu_i32_i64(t2, cc_op);
2659 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2660 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2661 tcg_temp_free_i64(t1);
2662 tcg_temp_free_i64(t2);
2663 return DISAS_NEXT;
2666 #ifndef CONFIG_USER_ONLY
2667 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2669 TCGv_i32 m4;
2671 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2672 m4 = tcg_const_i32(get_field(s->fields, m4));
2673 } else {
2674 m4 = tcg_const_i32(0);
2676 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2677 tcg_temp_free_i32(m4);
2678 return DISAS_NEXT;
2681 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2683 TCGv_i32 m4;
2685 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2686 m4 = tcg_const_i32(get_field(s->fields, m4));
2687 } else {
2688 m4 = tcg_const_i32(0);
2690 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2691 tcg_temp_free_i32(m4);
2692 return DISAS_NEXT;
2695 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2697 gen_helper_iske(o->out, cpu_env, o->in2);
2698 return DISAS_NEXT;
2700 #endif
2702 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2704 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2705 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2706 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2707 TCGv_i32 t_r1, t_r2, t_r3, type;
2709 switch (s->insn->data) {
2710 case S390_FEAT_TYPE_KMCTR:
2711 if (r3 & 1 || !r3) {
2712 gen_program_exception(s, PGM_SPECIFICATION);
2713 return DISAS_NORETURN;
2715 /* FALL THROUGH */
2716 case S390_FEAT_TYPE_PPNO:
2717 case S390_FEAT_TYPE_KMF:
2718 case S390_FEAT_TYPE_KMC:
2719 case S390_FEAT_TYPE_KMO:
2720 case S390_FEAT_TYPE_KM:
2721 if (r1 & 1 || !r1) {
2722 gen_program_exception(s, PGM_SPECIFICATION);
2723 return DISAS_NORETURN;
2725 /* FALL THROUGH */
2726 case S390_FEAT_TYPE_KMAC:
2727 case S390_FEAT_TYPE_KIMD:
2728 case S390_FEAT_TYPE_KLMD:
2729 if (r2 & 1 || !r2) {
2730 gen_program_exception(s, PGM_SPECIFICATION);
2731 return DISAS_NORETURN;
2733 /* FALL THROUGH */
2734 case S390_FEAT_TYPE_PCKMO:
2735 case S390_FEAT_TYPE_PCC:
2736 break;
2737 default:
2738 g_assert_not_reached();
2741 t_r1 = tcg_const_i32(r1);
2742 t_r2 = tcg_const_i32(r2);
2743 t_r3 = tcg_const_i32(r3);
2744 type = tcg_const_i32(s->insn->data);
2745 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2746 set_cc_static(s);
2747 tcg_temp_free_i32(t_r1);
2748 tcg_temp_free_i32(t_r2);
2749 tcg_temp_free_i32(t_r3);
2750 tcg_temp_free_i32(type);
2751 return DISAS_NEXT;
2754 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2756 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2757 set_cc_static(s);
2758 return DISAS_NEXT;
2761 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2763 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2764 set_cc_static(s);
2765 return DISAS_NEXT;
2768 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2770 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2771 set_cc_static(s);
2772 return DISAS_NEXT;
2775 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2777 /* The real output is indeed the original value in memory;
2778 recompute the addition for the computation of CC. */
2779 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2780 s->insn->data | MO_ALIGN);
2781 /* However, we need to recompute the addition for setting CC. */
2782 tcg_gen_add_i64(o->out, o->in1, o->in2);
2783 return DISAS_NEXT;
2786 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2788 /* The real output is indeed the original value in memory;
2789 recompute the addition for the computation of CC. */
2790 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2791 s->insn->data | MO_ALIGN);
2792 /* However, we need to recompute the operation for setting CC. */
2793 tcg_gen_and_i64(o->out, o->in1, o->in2);
2794 return DISAS_NEXT;
2797 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2799 /* The real output is indeed the original value in memory;
2800 recompute the addition for the computation of CC. */
2801 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2802 s->insn->data | MO_ALIGN);
2803 /* However, we need to recompute the operation for setting CC. */
2804 tcg_gen_or_i64(o->out, o->in1, o->in2);
2805 return DISAS_NEXT;
2808 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2810 /* The real output is indeed the original value in memory;
2811 recompute the addition for the computation of CC. */
2812 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2813 s->insn->data | MO_ALIGN);
2814 /* However, we need to recompute the operation for setting CC. */
2815 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2816 return DISAS_NEXT;
2819 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2821 gen_helper_ldeb(o->out, cpu_env, o->in2);
2822 return DISAS_NEXT;
2825 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2827 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2829 if (!m34) {
2830 return DISAS_NORETURN;
2832 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2833 tcg_temp_free_i32(m34);
2834 return DISAS_NEXT;
2837 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2839 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2841 if (!m34) {
2842 return DISAS_NORETURN;
2844 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2845 tcg_temp_free_i32(m34);
2846 return DISAS_NEXT;
2849 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2851 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2853 if (!m34) {
2854 return DISAS_NORETURN;
2856 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2857 tcg_temp_free_i32(m34);
2858 return DISAS_NEXT;
2861 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2863 gen_helper_lxdb(o->out, cpu_env, o->in2);
2864 return_low128(o->out2);
2865 return DISAS_NEXT;
2868 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2870 gen_helper_lxeb(o->out, cpu_env, o->in2);
2871 return_low128(o->out2);
2872 return DISAS_NEXT;
2875 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2877 tcg_gen_shli_i64(o->out, o->in2, 32);
2878 return DISAS_NEXT;
2881 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2883 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2884 return DISAS_NEXT;
2887 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2889 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2890 return DISAS_NEXT;
2893 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2895 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2896 return DISAS_NEXT;
2899 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2901 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2902 return DISAS_NEXT;
2905 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2907 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2908 return DISAS_NEXT;
2911 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2913 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2914 return DISAS_NEXT;
2917 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2919 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2920 return DISAS_NEXT;
2923 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2925 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2926 return DISAS_NEXT;
2929 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2931 TCGLabel *lab = gen_new_label();
2932 store_reg32_i64(get_field(s->fields, r1), o->in2);
2933 /* The value is stored even in case of trap. */
2934 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2935 gen_trap(s);
2936 gen_set_label(lab);
2937 return DISAS_NEXT;
2940 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2942 TCGLabel *lab = gen_new_label();
2943 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2944 /* The value is stored even in case of trap. */
2945 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2946 gen_trap(s);
2947 gen_set_label(lab);
2948 return DISAS_NEXT;
2951 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2953 TCGLabel *lab = gen_new_label();
2954 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2955 /* The value is stored even in case of trap. */
2956 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2957 gen_trap(s);
2958 gen_set_label(lab);
2959 return DISAS_NEXT;
2962 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2964 TCGLabel *lab = gen_new_label();
2965 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2966 /* The value is stored even in case of trap. */
2967 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2968 gen_trap(s);
2969 gen_set_label(lab);
2970 return DISAS_NEXT;
2973 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2975 TCGLabel *lab = gen_new_label();
2976 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2977 /* The value is stored even in case of trap. */
2978 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2979 gen_trap(s);
2980 gen_set_label(lab);
2981 return DISAS_NEXT;
2984 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2986 DisasCompare c;
2988 disas_jcc(s, &c, get_field(s->fields, m3));
2990 if (c.is_64) {
2991 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2992 o->in2, o->in1);
2993 free_compare(&c);
2994 } else {
2995 TCGv_i32 t32 = tcg_temp_new_i32();
2996 TCGv_i64 t, z;
2998 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2999 free_compare(&c);
3001 t = tcg_temp_new_i64();
3002 tcg_gen_extu_i32_i64(t, t32);
3003 tcg_temp_free_i32(t32);
3005 z = tcg_const_i64(0);
3006 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3007 tcg_temp_free_i64(t);
3008 tcg_temp_free_i64(z);
3011 return DISAS_NEXT;
3014 #ifndef CONFIG_USER_ONLY
3015 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3017 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3018 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3019 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3020 tcg_temp_free_i32(r1);
3021 tcg_temp_free_i32(r3);
3022 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3023 return DISAS_PC_STALE_NOCHAIN;
3026 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3028 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3029 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3030 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3031 tcg_temp_free_i32(r1);
3032 tcg_temp_free_i32(r3);
3033 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3034 return DISAS_PC_STALE_NOCHAIN;
3037 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3039 gen_helper_lra(o->out, cpu_env, o->in2);
3040 set_cc_static(s);
3041 return DISAS_NEXT;
3044 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3046 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3047 return DISAS_NEXT;
3050 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3052 TCGv_i64 t1, t2;
3054 per_breaking_event(s);
3056 t1 = tcg_temp_new_i64();
3057 t2 = tcg_temp_new_i64();
3058 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3059 MO_TEUL | MO_ALIGN_8);
3060 tcg_gen_addi_i64(o->in2, o->in2, 4);
3061 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3062 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3063 tcg_gen_shli_i64(t1, t1, 32);
3064 gen_helper_load_psw(cpu_env, t1, t2);
3065 tcg_temp_free_i64(t1);
3066 tcg_temp_free_i64(t2);
3067 return DISAS_NORETURN;
3070 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3072 TCGv_i64 t1, t2;
3074 per_breaking_event(s);
3076 t1 = tcg_temp_new_i64();
3077 t2 = tcg_temp_new_i64();
3078 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3079 MO_TEQ | MO_ALIGN_8);
3080 tcg_gen_addi_i64(o->in2, o->in2, 8);
3081 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3082 gen_helper_load_psw(cpu_env, t1, t2);
3083 tcg_temp_free_i64(t1);
3084 tcg_temp_free_i64(t2);
3085 return DISAS_NORETURN;
3087 #endif
3089 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3091 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3092 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3093 gen_helper_lam(cpu_env, r1, o->in2, r3);
3094 tcg_temp_free_i32(r1);
3095 tcg_temp_free_i32(r3);
3096 return DISAS_NEXT;
3099 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3101 int r1 = get_field(s->fields, r1);
3102 int r3 = get_field(s->fields, r3);
3103 TCGv_i64 t1, t2;
3105 /* Only one register to read. */
3106 t1 = tcg_temp_new_i64();
3107 if (unlikely(r1 == r3)) {
3108 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3109 store_reg32_i64(r1, t1);
3110 tcg_temp_free(t1);
3111 return DISAS_NEXT;
3114 /* First load the values of the first and last registers to trigger
3115 possible page faults. */
3116 t2 = tcg_temp_new_i64();
3117 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3118 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3119 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3120 store_reg32_i64(r1, t1);
3121 store_reg32_i64(r3, t2);
3123 /* Only two registers to read. */
3124 if (((r1 + 1) & 15) == r3) {
3125 tcg_temp_free(t2);
3126 tcg_temp_free(t1);
3127 return DISAS_NEXT;
3130 /* Then load the remaining registers. Page fault can't occur. */
3131 r3 = (r3 - 1) & 15;
3132 tcg_gen_movi_i64(t2, 4);
3133 while (r1 != r3) {
3134 r1 = (r1 + 1) & 15;
3135 tcg_gen_add_i64(o->in2, o->in2, t2);
3136 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3137 store_reg32_i64(r1, t1);
3139 tcg_temp_free(t2);
3140 tcg_temp_free(t1);
3142 return DISAS_NEXT;
3145 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3147 int r1 = get_field(s->fields, r1);
3148 int r3 = get_field(s->fields, r3);
3149 TCGv_i64 t1, t2;
3151 /* Only one register to read. */
3152 t1 = tcg_temp_new_i64();
3153 if (unlikely(r1 == r3)) {
3154 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3155 store_reg32h_i64(r1, t1);
3156 tcg_temp_free(t1);
3157 return DISAS_NEXT;
3160 /* First load the values of the first and last registers to trigger
3161 possible page faults. */
3162 t2 = tcg_temp_new_i64();
3163 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3164 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3165 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3166 store_reg32h_i64(r1, t1);
3167 store_reg32h_i64(r3, t2);
3169 /* Only two registers to read. */
3170 if (((r1 + 1) & 15) == r3) {
3171 tcg_temp_free(t2);
3172 tcg_temp_free(t1);
3173 return DISAS_NEXT;
3176 /* Then load the remaining registers. Page fault can't occur. */
3177 r3 = (r3 - 1) & 15;
3178 tcg_gen_movi_i64(t2, 4);
3179 while (r1 != r3) {
3180 r1 = (r1 + 1) & 15;
3181 tcg_gen_add_i64(o->in2, o->in2, t2);
3182 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3183 store_reg32h_i64(r1, t1);
3185 tcg_temp_free(t2);
3186 tcg_temp_free(t1);
3188 return DISAS_NEXT;
3191 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3193 int r1 = get_field(s->fields, r1);
3194 int r3 = get_field(s->fields, r3);
3195 TCGv_i64 t1, t2;
3197 /* Only one register to read. */
3198 if (unlikely(r1 == r3)) {
3199 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3200 return DISAS_NEXT;
3203 /* First load the values of the first and last registers to trigger
3204 possible page faults. */
3205 t1 = tcg_temp_new_i64();
3206 t2 = tcg_temp_new_i64();
3207 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3208 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3209 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3210 tcg_gen_mov_i64(regs[r1], t1);
3211 tcg_temp_free(t2);
3213 /* Only two registers to read. */
3214 if (((r1 + 1) & 15) == r3) {
3215 tcg_temp_free(t1);
3216 return DISAS_NEXT;
3219 /* Then load the remaining registers. Page fault can't occur. */
3220 r3 = (r3 - 1) & 15;
3221 tcg_gen_movi_i64(t1, 8);
3222 while (r1 != r3) {
3223 r1 = (r1 + 1) & 15;
3224 tcg_gen_add_i64(o->in2, o->in2, t1);
3225 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3227 tcg_temp_free(t1);
3229 return DISAS_NEXT;
3232 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3234 TCGv_i64 a1, a2;
3235 MemOp mop = s->insn->data;
3237 /* In a parallel context, stop the world and single step. */
3238 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3239 update_psw_addr(s);
3240 update_cc_op(s);
3241 gen_exception(EXCP_ATOMIC);
3242 return DISAS_NORETURN;
3245 /* In a serial context, perform the two loads ... */
3246 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3247 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3248 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3249 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3250 tcg_temp_free_i64(a1);
3251 tcg_temp_free_i64(a2);
3253 /* ... and indicate that we performed them while interlocked. */
3254 gen_op_movi_cc(s, 0);
3255 return DISAS_NEXT;
3258 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3260 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3261 gen_helper_lpq(o->out, cpu_env, o->in2);
3262 } else if (HAVE_ATOMIC128) {
3263 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3264 } else {
3265 gen_helper_exit_atomic(cpu_env);
3266 return DISAS_NORETURN;
3268 return_low128(o->out2);
3269 return DISAS_NEXT;
3272 #ifndef CONFIG_USER_ONLY
3273 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3275 o->addr1 = get_address(s, 0, get_field(s->fields, r2), 0);
3276 tcg_gen_qemu_ld_tl(o->out, o->addr1, MMU_REAL_IDX, s->insn->data);
3277 return DISAS_NEXT;
3279 #endif
3281 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3283 tcg_gen_andi_i64(o->out, o->in2, -256);
3284 return DISAS_NEXT;
3287 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3289 const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
3291 if (get_field(s->fields, m3) > 6) {
3292 gen_program_exception(s, PGM_SPECIFICATION);
3293 return DISAS_NORETURN;
3296 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3297 tcg_gen_neg_i64(o->addr1, o->addr1);
3298 tcg_gen_movi_i64(o->out, 16);
3299 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3300 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3301 return DISAS_NEXT;
3304 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3306 o->out = o->in2;
3307 o->g_out = o->g_in2;
3308 o->in2 = NULL;
3309 o->g_in2 = false;
3310 return DISAS_NEXT;
3313 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3315 int b2 = get_field(s->fields, b2);
3316 TCGv ar1 = tcg_temp_new_i64();
3318 o->out = o->in2;
3319 o->g_out = o->g_in2;
3320 o->in2 = NULL;
3321 o->g_in2 = false;
3323 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3324 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3325 tcg_gen_movi_i64(ar1, 0);
3326 break;
3327 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3328 tcg_gen_movi_i64(ar1, 1);
3329 break;
3330 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3331 if (b2) {
3332 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3333 } else {
3334 tcg_gen_movi_i64(ar1, 0);
3336 break;
3337 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3338 tcg_gen_movi_i64(ar1, 2);
3339 break;
3342 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3343 tcg_temp_free_i64(ar1);
3345 return DISAS_NEXT;
3348 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3350 o->out = o->in1;
3351 o->out2 = o->in2;
3352 o->g_out = o->g_in1;
3353 o->g_out2 = o->g_in2;
3354 o->in1 = NULL;
3355 o->in2 = NULL;
3356 o->g_in1 = o->g_in2 = false;
3357 return DISAS_NEXT;
3360 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3362 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3363 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3364 tcg_temp_free_i32(l);
3365 return DISAS_NEXT;
3368 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3370 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3371 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3372 tcg_temp_free_i32(l);
3373 return DISAS_NEXT;
3376 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3378 int r1 = get_field(s->fields, r1);
3379 int r2 = get_field(s->fields, r2);
3380 TCGv_i32 t1, t2;
3382 /* r1 and r2 must be even. */
3383 if (r1 & 1 || r2 & 1) {
3384 gen_program_exception(s, PGM_SPECIFICATION);
3385 return DISAS_NORETURN;
3388 t1 = tcg_const_i32(r1);
3389 t2 = tcg_const_i32(r2);
3390 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3391 tcg_temp_free_i32(t1);
3392 tcg_temp_free_i32(t2);
3393 set_cc_static(s);
3394 return DISAS_NEXT;
3397 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3399 int r1 = get_field(s->fields, r1);
3400 int r3 = get_field(s->fields, r3);
3401 TCGv_i32 t1, t3;
3403 /* r1 and r3 must be even. */
3404 if (r1 & 1 || r3 & 1) {
3405 gen_program_exception(s, PGM_SPECIFICATION);
3406 return DISAS_NORETURN;
3409 t1 = tcg_const_i32(r1);
3410 t3 = tcg_const_i32(r3);
3411 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3412 tcg_temp_free_i32(t1);
3413 tcg_temp_free_i32(t3);
3414 set_cc_static(s);
3415 return DISAS_NEXT;
3418 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3420 int r1 = get_field(s->fields, r1);
3421 int r3 = get_field(s->fields, r3);
3422 TCGv_i32 t1, t3;
3424 /* r1 and r3 must be even. */
3425 if (r1 & 1 || r3 & 1) {
3426 gen_program_exception(s, PGM_SPECIFICATION);
3427 return DISAS_NORETURN;
3430 t1 = tcg_const_i32(r1);
3431 t3 = tcg_const_i32(r3);
3432 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3433 tcg_temp_free_i32(t1);
3434 tcg_temp_free_i32(t3);
3435 set_cc_static(s);
3436 return DISAS_NEXT;
3439 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3441 int r3 = get_field(s->fields, r3);
3442 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3443 set_cc_static(s);
3444 return DISAS_NEXT;
3447 #ifndef CONFIG_USER_ONLY
3448 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3450 int r1 = get_field(s->fields, l1);
3451 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3452 set_cc_static(s);
3453 return DISAS_NEXT;
3456 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3458 int r1 = get_field(s->fields, l1);
3459 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3460 set_cc_static(s);
3461 return DISAS_NEXT;
3463 #endif
3465 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3467 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3468 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3469 tcg_temp_free_i32(l);
3470 return DISAS_NEXT;
3473 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3475 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3476 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3477 tcg_temp_free_i32(l);
3478 return DISAS_NEXT;
3481 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3483 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3484 set_cc_static(s);
3485 return DISAS_NEXT;
3488 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3490 TCGv_i32 t1 = tcg_const_i32(get_field(s->fields, r1));
3491 TCGv_i32 t2 = tcg_const_i32(get_field(s->fields, r2));
3493 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3494 tcg_temp_free_i32(t1);
3495 tcg_temp_free_i32(t2);
3496 set_cc_static(s);
3497 return DISAS_NEXT;
3500 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3502 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3503 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3504 tcg_temp_free_i32(l);
3505 return DISAS_NEXT;
3508 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3510 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3511 return DISAS_NEXT;
3514 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3516 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3517 return DISAS_NEXT;
3520 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3522 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3523 return DISAS_NEXT;
3526 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3528 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3529 return DISAS_NEXT;
3532 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3534 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3535 return DISAS_NEXT;
3538 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3540 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3541 return_low128(o->out2);
3542 return DISAS_NEXT;
3545 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3547 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3548 return_low128(o->out2);
3549 return DISAS_NEXT;
3552 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3554 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3555 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3556 tcg_temp_free_i64(r3);
3557 return DISAS_NEXT;
3560 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3562 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3563 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3564 tcg_temp_free_i64(r3);
3565 return DISAS_NEXT;
3568 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3570 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3571 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3572 tcg_temp_free_i64(r3);
3573 return DISAS_NEXT;
3576 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3578 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3579 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3580 tcg_temp_free_i64(r3);
3581 return DISAS_NEXT;
3584 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3586 TCGv_i64 z, n;
3587 z = tcg_const_i64(0);
3588 n = tcg_temp_new_i64();
3589 tcg_gen_neg_i64(n, o->in2);
3590 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3591 tcg_temp_free_i64(n);
3592 tcg_temp_free_i64(z);
3593 return DISAS_NEXT;
3596 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3598 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3599 return DISAS_NEXT;
3602 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3604 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3605 return DISAS_NEXT;
3608 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3610 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3611 tcg_gen_mov_i64(o->out2, o->in2);
3612 return DISAS_NEXT;
3615 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3617 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3618 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3619 tcg_temp_free_i32(l);
3620 set_cc_static(s);
3621 return DISAS_NEXT;
3624 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3626 tcg_gen_neg_i64(o->out, o->in2);
3627 return DISAS_NEXT;
3630 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3632 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3633 return DISAS_NEXT;
3636 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3638 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3639 return DISAS_NEXT;
3642 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3644 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3645 tcg_gen_mov_i64(o->out2, o->in2);
3646 return DISAS_NEXT;
3649 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3651 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3652 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3653 tcg_temp_free_i32(l);
3654 set_cc_static(s);
3655 return DISAS_NEXT;
3658 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3660 tcg_gen_or_i64(o->out, o->in1, o->in2);
3661 return DISAS_NEXT;
3664 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3666 int shift = s->insn->data & 0xff;
3667 int size = s->insn->data >> 8;
3668 uint64_t mask = ((1ull << size) - 1) << shift;
3670 assert(!o->g_in2);
3671 tcg_gen_shli_i64(o->in2, o->in2, shift);
3672 tcg_gen_or_i64(o->out, o->in1, o->in2);
3674 /* Produce the CC from only the bits manipulated. */
3675 tcg_gen_andi_i64(cc_dst, o->out, mask);
3676 set_cc_nz_u64(s, cc_dst);
3677 return DISAS_NEXT;
3680 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3682 o->in1 = tcg_temp_new_i64();
3684 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3685 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3686 } else {
3687 /* Perform the atomic operation in memory. */
3688 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3689 s->insn->data);
3692 /* Recompute also for atomic case: needed for setting CC. */
3693 tcg_gen_or_i64(o->out, o->in1, o->in2);
3695 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3696 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3698 return DISAS_NEXT;
3701 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3703 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3704 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3705 tcg_temp_free_i32(l);
3706 return DISAS_NEXT;
3709 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3711 int l2 = get_field(s->fields, l2) + 1;
3712 TCGv_i32 l;
3714 /* The length must not exceed 32 bytes. */
3715 if (l2 > 32) {
3716 gen_program_exception(s, PGM_SPECIFICATION);
3717 return DISAS_NORETURN;
3719 l = tcg_const_i32(l2);
3720 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3721 tcg_temp_free_i32(l);
3722 return DISAS_NEXT;
3725 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3727 int l2 = get_field(s->fields, l2) + 1;
3728 TCGv_i32 l;
3730 /* The length must be even and should not exceed 64 bytes. */
3731 if ((l2 & 1) || (l2 > 64)) {
3732 gen_program_exception(s, PGM_SPECIFICATION);
3733 return DISAS_NORETURN;
3735 l = tcg_const_i32(l2);
3736 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3737 tcg_temp_free_i32(l);
3738 return DISAS_NEXT;
3741 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3743 gen_helper_popcnt(o->out, o->in2);
3744 return DISAS_NEXT;
3747 #ifndef CONFIG_USER_ONLY
3748 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3750 gen_helper_ptlb(cpu_env);
3751 return DISAS_NEXT;
3753 #endif
3755 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3757 int i3 = get_field(s->fields, i3);
3758 int i4 = get_field(s->fields, i4);
3759 int i5 = get_field(s->fields, i5);
3760 int do_zero = i4 & 0x80;
3761 uint64_t mask, imask, pmask;
3762 int pos, len, rot;
3764 /* Adjust the arguments for the specific insn. */
3765 switch (s->fields->op2) {
3766 case 0x55: /* risbg */
3767 case 0x59: /* risbgn */
3768 i3 &= 63;
3769 i4 &= 63;
3770 pmask = ~0;
3771 break;
3772 case 0x5d: /* risbhg */
3773 i3 &= 31;
3774 i4 &= 31;
3775 pmask = 0xffffffff00000000ull;
3776 break;
3777 case 0x51: /* risblg */
3778 i3 &= 31;
3779 i4 &= 31;
3780 pmask = 0x00000000ffffffffull;
3781 break;
3782 default:
3783 g_assert_not_reached();
3786 /* MASK is the set of bits to be inserted from R2.
3787 Take care for I3/I4 wraparound. */
3788 mask = pmask >> i3;
3789 if (i3 <= i4) {
3790 mask ^= pmask >> i4 >> 1;
3791 } else {
3792 mask |= ~(pmask >> i4 >> 1);
3794 mask &= pmask;
3796 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3797 insns, we need to keep the other half of the register. */
3798 imask = ~mask | ~pmask;
3799 if (do_zero) {
3800 imask = ~pmask;
3803 len = i4 - i3 + 1;
3804 pos = 63 - i4;
3805 rot = i5 & 63;
3806 if (s->fields->op2 == 0x5d) {
3807 pos += 32;
3810 /* In some cases we can implement this with extract. */
3811 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3812 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3813 return DISAS_NEXT;
3816 /* In some cases we can implement this with deposit. */
3817 if (len > 0 && (imask == 0 || ~mask == imask)) {
3818 /* Note that we rotate the bits to be inserted to the lsb, not to
3819 the position as described in the PoO. */
3820 rot = (rot - pos) & 63;
3821 } else {
3822 pos = -1;
3825 /* Rotate the input as necessary. */
3826 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3828 /* Insert the selected bits into the output. */
3829 if (pos >= 0) {
3830 if (imask == 0) {
3831 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3832 } else {
3833 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3835 } else if (imask == 0) {
3836 tcg_gen_andi_i64(o->out, o->in2, mask);
3837 } else {
3838 tcg_gen_andi_i64(o->in2, o->in2, mask);
3839 tcg_gen_andi_i64(o->out, o->out, imask);
3840 tcg_gen_or_i64(o->out, o->out, o->in2);
3842 return DISAS_NEXT;
3845 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3847 int i3 = get_field(s->fields, i3);
3848 int i4 = get_field(s->fields, i4);
3849 int i5 = get_field(s->fields, i5);
3850 uint64_t mask;
3852 /* If this is a test-only form, arrange to discard the result. */
3853 if (i3 & 0x80) {
3854 o->out = tcg_temp_new_i64();
3855 o->g_out = false;
3858 i3 &= 63;
3859 i4 &= 63;
3860 i5 &= 63;
3862 /* MASK is the set of bits to be operated on from R2.
3863 Take care for I3/I4 wraparound. */
3864 mask = ~0ull >> i3;
3865 if (i3 <= i4) {
3866 mask ^= ~0ull >> i4 >> 1;
3867 } else {
3868 mask |= ~(~0ull >> i4 >> 1);
3871 /* Rotate the input as necessary. */
3872 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3874 /* Operate. */
3875 switch (s->fields->op2) {
3876 case 0x55: /* AND */
3877 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3878 tcg_gen_and_i64(o->out, o->out, o->in2);
3879 break;
3880 case 0x56: /* OR */
3881 tcg_gen_andi_i64(o->in2, o->in2, mask);
3882 tcg_gen_or_i64(o->out, o->out, o->in2);
3883 break;
3884 case 0x57: /* XOR */
3885 tcg_gen_andi_i64(o->in2, o->in2, mask);
3886 tcg_gen_xor_i64(o->out, o->out, o->in2);
3887 break;
3888 default:
3889 abort();
3892 /* Set the CC. */
3893 tcg_gen_andi_i64(cc_dst, o->out, mask);
3894 set_cc_nz_u64(s, cc_dst);
3895 return DISAS_NEXT;
3898 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3900 tcg_gen_bswap16_i64(o->out, o->in2);
3901 return DISAS_NEXT;
3904 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3906 tcg_gen_bswap32_i64(o->out, o->in2);
3907 return DISAS_NEXT;
3910 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3912 tcg_gen_bswap64_i64(o->out, o->in2);
3913 return DISAS_NEXT;
3916 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3918 TCGv_i32 t1 = tcg_temp_new_i32();
3919 TCGv_i32 t2 = tcg_temp_new_i32();
3920 TCGv_i32 to = tcg_temp_new_i32();
3921 tcg_gen_extrl_i64_i32(t1, o->in1);
3922 tcg_gen_extrl_i64_i32(t2, o->in2);
3923 tcg_gen_rotl_i32(to, t1, t2);
3924 tcg_gen_extu_i32_i64(o->out, to);
3925 tcg_temp_free_i32(t1);
3926 tcg_temp_free_i32(t2);
3927 tcg_temp_free_i32(to);
3928 return DISAS_NEXT;
3931 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3933 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3934 return DISAS_NEXT;
3937 #ifndef CONFIG_USER_ONLY
3938 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3940 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3941 set_cc_static(s);
3942 return DISAS_NEXT;
3945 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3947 gen_helper_sacf(cpu_env, o->in2);
3948 /* Addressing mode has changed, so end the block. */
3949 return DISAS_PC_STALE;
3951 #endif
3953 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3955 int sam = s->insn->data;
3956 TCGv_i64 tsam;
3957 uint64_t mask;
3959 switch (sam) {
3960 case 0:
3961 mask = 0xffffff;
3962 break;
3963 case 1:
3964 mask = 0x7fffffff;
3965 break;
3966 default:
3967 mask = -1;
3968 break;
3971 /* Bizarre but true, we check the address of the current insn for the
3972 specification exception, not the next to be executed. Thus the PoO
3973 documents that Bad Things Happen two bytes before the end. */
3974 if (s->base.pc_next & ~mask) {
3975 gen_program_exception(s, PGM_SPECIFICATION);
3976 return DISAS_NORETURN;
3978 s->pc_tmp &= mask;
3980 tsam = tcg_const_i64(sam);
3981 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3982 tcg_temp_free_i64(tsam);
3984 /* Always exit the TB, since we (may have) changed execution mode. */
3985 return DISAS_PC_STALE;
3988 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3990 int r1 = get_field(s->fields, r1);
3991 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3992 return DISAS_NEXT;
3995 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3997 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3998 return DISAS_NEXT;
4001 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4003 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4004 return DISAS_NEXT;
4007 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4009 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4010 return_low128(o->out2);
4011 return DISAS_NEXT;
4014 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4016 gen_helper_sqeb(o->out, cpu_env, o->in2);
4017 return DISAS_NEXT;
4020 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4022 gen_helper_sqdb(o->out, cpu_env, o->in2);
4023 return DISAS_NEXT;
4026 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4028 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4029 return_low128(o->out2);
4030 return DISAS_NEXT;
4033 #ifndef CONFIG_USER_ONLY
4034 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4036 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4037 set_cc_static(s);
4038 return DISAS_NEXT;
4041 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4043 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4044 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4045 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4046 set_cc_static(s);
4047 tcg_temp_free_i32(r1);
4048 tcg_temp_free_i32(r3);
4049 return DISAS_NEXT;
4051 #endif
4053 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4055 DisasCompare c;
4056 TCGv_i64 a, h;
4057 TCGLabel *lab;
4058 int r1;
4060 disas_jcc(s, &c, get_field(s->fields, m3));
4062 /* We want to store when the condition is fulfilled, so branch
4063 out when it's not */
4064 c.cond = tcg_invert_cond(c.cond);
4066 lab = gen_new_label();
4067 if (c.is_64) {
4068 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4069 } else {
4070 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4072 free_compare(&c);
4074 r1 = get_field(s->fields, r1);
4075 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
4076 switch (s->insn->data) {
4077 case 1: /* STOCG */
4078 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4079 break;
4080 case 0: /* STOC */
4081 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4082 break;
4083 case 2: /* STOCFH */
4084 h = tcg_temp_new_i64();
4085 tcg_gen_shri_i64(h, regs[r1], 32);
4086 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4087 tcg_temp_free_i64(h);
4088 break;
4089 default:
4090 g_assert_not_reached();
4092 tcg_temp_free_i64(a);
4094 gen_set_label(lab);
4095 return DISAS_NEXT;
4098 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4100 uint64_t sign = 1ull << s->insn->data;
4101 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4102 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4103 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4104 /* The arithmetic left shift is curious in that it does not affect
4105 the sign bit. Copy that over from the source unchanged. */
4106 tcg_gen_andi_i64(o->out, o->out, ~sign);
4107 tcg_gen_andi_i64(o->in1, o->in1, sign);
4108 tcg_gen_or_i64(o->out, o->out, o->in1);
4109 return DISAS_NEXT;
4112 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4114 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4115 return DISAS_NEXT;
4118 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4120 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4121 return DISAS_NEXT;
4124 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4126 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4127 return DISAS_NEXT;
4130 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4132 gen_helper_sfpc(cpu_env, o->in2);
4133 return DISAS_NEXT;
4136 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4138 gen_helper_sfas(cpu_env, o->in2);
4139 return DISAS_NEXT;
4142 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4144 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4145 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4146 gen_helper_srnm(cpu_env, o->addr1);
4147 return DISAS_NEXT;
4150 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4152 /* Bits 0-55 are are ignored. */
4153 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4154 gen_helper_srnm(cpu_env, o->addr1);
4155 return DISAS_NEXT;
4158 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4160 TCGv_i64 tmp = tcg_temp_new_i64();
4162 /* Bits other than 61-63 are ignored. */
4163 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4165 /* No need to call a helper, we don't implement dfp */
4166 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4167 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4168 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4170 tcg_temp_free_i64(tmp);
4171 return DISAS_NEXT;
4174 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4176 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4177 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4178 set_cc_static(s);
4180 tcg_gen_shri_i64(o->in1, o->in1, 24);
4181 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4182 return DISAS_NEXT;
4185 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4187 int b1 = get_field(s->fields, b1);
4188 int d1 = get_field(s->fields, d1);
4189 int b2 = get_field(s->fields, b2);
4190 int d2 = get_field(s->fields, d2);
4191 int r3 = get_field(s->fields, r3);
4192 TCGv_i64 tmp = tcg_temp_new_i64();
4194 /* fetch all operands first */
4195 o->in1 = tcg_temp_new_i64();
4196 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4197 o->in2 = tcg_temp_new_i64();
4198 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4199 o->addr1 = get_address(s, 0, r3, 0);
4201 /* load the third operand into r3 before modifying anything */
4202 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4204 /* subtract CPU timer from first operand and store in GR0 */
4205 gen_helper_stpt(tmp, cpu_env);
4206 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4208 /* store second operand in GR1 */
4209 tcg_gen_mov_i64(regs[1], o->in2);
4211 tcg_temp_free_i64(tmp);
4212 return DISAS_NEXT;
4215 #ifndef CONFIG_USER_ONLY
4216 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4218 tcg_gen_shri_i64(o->in2, o->in2, 4);
4219 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4220 return DISAS_NEXT;
4223 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4225 gen_helper_sske(cpu_env, o->in1, o->in2);
4226 return DISAS_NEXT;
4229 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4231 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4232 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4233 return DISAS_PC_STALE_NOCHAIN;
4236 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4238 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4239 return DISAS_NEXT;
4241 #endif
4243 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4245 gen_helper_stck(o->out, cpu_env);
4246 /* ??? We don't implement clock states. */
4247 gen_op_movi_cc(s, 0);
4248 return DISAS_NEXT;
4251 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4253 TCGv_i64 c1 = tcg_temp_new_i64();
4254 TCGv_i64 c2 = tcg_temp_new_i64();
4255 TCGv_i64 todpr = tcg_temp_new_i64();
4256 gen_helper_stck(c1, cpu_env);
4257 /* 16 bit value store in an uint32_t (only valid bits set) */
4258 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4259 /* Shift the 64-bit value into its place as a zero-extended
4260 104-bit value. Note that "bit positions 64-103 are always
4261 non-zero so that they compare differently to STCK"; we set
4262 the least significant bit to 1. */
4263 tcg_gen_shli_i64(c2, c1, 56);
4264 tcg_gen_shri_i64(c1, c1, 8);
4265 tcg_gen_ori_i64(c2, c2, 0x10000);
4266 tcg_gen_or_i64(c2, c2, todpr);
4267 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4268 tcg_gen_addi_i64(o->in2, o->in2, 8);
4269 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4270 tcg_temp_free_i64(c1);
4271 tcg_temp_free_i64(c2);
4272 tcg_temp_free_i64(todpr);
4273 /* ??? We don't implement clock states. */
4274 gen_op_movi_cc(s, 0);
4275 return DISAS_NEXT;
4278 #ifndef CONFIG_USER_ONLY
4279 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4281 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4282 gen_helper_sck(cc_op, cpu_env, o->in1);
4283 set_cc_static(s);
4284 return DISAS_NEXT;
4287 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4289 gen_helper_sckc(cpu_env, o->in2);
4290 return DISAS_NEXT;
4293 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4295 gen_helper_sckpf(cpu_env, regs[0]);
4296 return DISAS_NEXT;
4299 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4301 gen_helper_stckc(o->out, cpu_env);
4302 return DISAS_NEXT;
4305 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4307 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4308 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4309 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4310 tcg_temp_free_i32(r1);
4311 tcg_temp_free_i32(r3);
4312 return DISAS_NEXT;
4315 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4317 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4318 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4319 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4320 tcg_temp_free_i32(r1);
4321 tcg_temp_free_i32(r3);
4322 return DISAS_NEXT;
4325 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4327 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4328 return DISAS_NEXT;
4331 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4333 gen_helper_spt(cpu_env, o->in2);
4334 return DISAS_NEXT;
4337 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4339 gen_helper_stfl(cpu_env);
4340 return DISAS_NEXT;
4343 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4345 gen_helper_stpt(o->out, cpu_env);
4346 return DISAS_NEXT;
4349 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4351 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4352 set_cc_static(s);
4353 return DISAS_NEXT;
4356 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4358 gen_helper_spx(cpu_env, o->in2);
4359 return DISAS_NEXT;
4362 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4364 gen_helper_xsch(cpu_env, regs[1]);
4365 set_cc_static(s);
4366 return DISAS_NEXT;
4369 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4371 gen_helper_csch(cpu_env, regs[1]);
4372 set_cc_static(s);
4373 return DISAS_NEXT;
4376 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4378 gen_helper_hsch(cpu_env, regs[1]);
4379 set_cc_static(s);
4380 return DISAS_NEXT;
4383 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4385 gen_helper_msch(cpu_env, regs[1], o->in2);
4386 set_cc_static(s);
4387 return DISAS_NEXT;
4390 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4392 gen_helper_rchp(cpu_env, regs[1]);
4393 set_cc_static(s);
4394 return DISAS_NEXT;
4397 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4399 gen_helper_rsch(cpu_env, regs[1]);
4400 set_cc_static(s);
4401 return DISAS_NEXT;
4404 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4406 gen_helper_sal(cpu_env, regs[1]);
4407 return DISAS_NEXT;
4410 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4412 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4413 return DISAS_NEXT;
4416 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4418 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4419 gen_op_movi_cc(s, 3);
4420 return DISAS_NEXT;
4423 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4425 /* The instruction is suppressed if not provided. */
4426 return DISAS_NEXT;
4429 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4431 gen_helper_ssch(cpu_env, regs[1], o->in2);
4432 set_cc_static(s);
4433 return DISAS_NEXT;
4436 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4438 gen_helper_stsch(cpu_env, regs[1], o->in2);
4439 set_cc_static(s);
4440 return DISAS_NEXT;
4443 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4445 gen_helper_stcrw(cpu_env, o->in2);
4446 set_cc_static(s);
4447 return DISAS_NEXT;
4450 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4452 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4453 set_cc_static(s);
4454 return DISAS_NEXT;
4457 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4459 gen_helper_tsch(cpu_env, regs[1], o->in2);
4460 set_cc_static(s);
4461 return DISAS_NEXT;
4464 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4466 gen_helper_chsc(cpu_env, o->in2);
4467 set_cc_static(s);
4468 return DISAS_NEXT;
4471 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4473 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4474 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4475 return DISAS_NEXT;
4478 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4480 uint64_t i2 = get_field(s->fields, i2);
4481 TCGv_i64 t;
4483 /* It is important to do what the instruction name says: STORE THEN.
4484 If we let the output hook perform the store then if we fault and
4485 restart, we'll have the wrong SYSTEM MASK in place. */
4486 t = tcg_temp_new_i64();
4487 tcg_gen_shri_i64(t, psw_mask, 56);
4488 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4489 tcg_temp_free_i64(t);
4491 if (s->fields->op == 0xac) {
4492 tcg_gen_andi_i64(psw_mask, psw_mask,
4493 (i2 << 56) | 0x00ffffffffffffffull);
4494 } else {
4495 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4498 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4499 return DISAS_PC_STALE_NOCHAIN;
4502 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4504 o->addr1 = get_address(s, 0, get_field(s->fields, r2), 0);
4505 tcg_gen_qemu_st_tl(o->in1, o->addr1, MMU_REAL_IDX, s->insn->data);
4507 if (s->base.tb->flags & FLAG_MASK_PER) {
4508 update_psw_addr(s);
4509 gen_helper_per_store_real(cpu_env);
4511 return DISAS_NEXT;
4513 #endif
4515 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4517 gen_helper_stfle(cc_op, cpu_env, o->in2);
4518 set_cc_static(s);
4519 return DISAS_NEXT;
4522 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4524 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4525 return DISAS_NEXT;
4528 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4530 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4531 return DISAS_NEXT;
4534 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4536 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4537 return DISAS_NEXT;
4540 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4542 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4543 return DISAS_NEXT;
4546 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4548 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4549 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4550 gen_helper_stam(cpu_env, r1, o->in2, r3);
4551 tcg_temp_free_i32(r1);
4552 tcg_temp_free_i32(r3);
4553 return DISAS_NEXT;
4556 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4558 int m3 = get_field(s->fields, m3);
4559 int pos, base = s->insn->data;
4560 TCGv_i64 tmp = tcg_temp_new_i64();
4562 pos = base + ctz32(m3) * 8;
4563 switch (m3) {
4564 case 0xf:
4565 /* Effectively a 32-bit store. */
4566 tcg_gen_shri_i64(tmp, o->in1, pos);
4567 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4568 break;
4570 case 0xc:
4571 case 0x6:
4572 case 0x3:
4573 /* Effectively a 16-bit store. */
4574 tcg_gen_shri_i64(tmp, o->in1, pos);
4575 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4576 break;
4578 case 0x8:
4579 case 0x4:
4580 case 0x2:
4581 case 0x1:
4582 /* Effectively an 8-bit store. */
4583 tcg_gen_shri_i64(tmp, o->in1, pos);
4584 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4585 break;
4587 default:
4588 /* This is going to be a sequence of shifts and stores. */
4589 pos = base + 32 - 8;
4590 while (m3) {
4591 if (m3 & 0x8) {
4592 tcg_gen_shri_i64(tmp, o->in1, pos);
4593 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4594 tcg_gen_addi_i64(o->in2, o->in2, 1);
4596 m3 = (m3 << 1) & 0xf;
4597 pos -= 8;
4599 break;
4601 tcg_temp_free_i64(tmp);
4602 return DISAS_NEXT;
4605 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4607 int r1 = get_field(s->fields, r1);
4608 int r3 = get_field(s->fields, r3);
4609 int size = s->insn->data;
4610 TCGv_i64 tsize = tcg_const_i64(size);
4612 while (1) {
4613 if (size == 8) {
4614 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4615 } else {
4616 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4618 if (r1 == r3) {
4619 break;
4621 tcg_gen_add_i64(o->in2, o->in2, tsize);
4622 r1 = (r1 + 1) & 15;
4625 tcg_temp_free_i64(tsize);
4626 return DISAS_NEXT;
4629 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4631 int r1 = get_field(s->fields, r1);
4632 int r3 = get_field(s->fields, r3);
4633 TCGv_i64 t = tcg_temp_new_i64();
4634 TCGv_i64 t4 = tcg_const_i64(4);
4635 TCGv_i64 t32 = tcg_const_i64(32);
4637 while (1) {
4638 tcg_gen_shl_i64(t, regs[r1], t32);
4639 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4640 if (r1 == r3) {
4641 break;
4643 tcg_gen_add_i64(o->in2, o->in2, t4);
4644 r1 = (r1 + 1) & 15;
4647 tcg_temp_free_i64(t);
4648 tcg_temp_free_i64(t4);
4649 tcg_temp_free_i64(t32);
4650 return DISAS_NEXT;
4653 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4655 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4656 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4657 } else if (HAVE_ATOMIC128) {
4658 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4659 } else {
4660 gen_helper_exit_atomic(cpu_env);
4661 return DISAS_NORETURN;
4663 return DISAS_NEXT;
4666 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4668 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4669 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4671 gen_helper_srst(cpu_env, r1, r2);
4673 tcg_temp_free_i32(r1);
4674 tcg_temp_free_i32(r2);
4675 set_cc_static(s);
4676 return DISAS_NEXT;
4679 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4681 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4682 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4684 gen_helper_srstu(cpu_env, r1, r2);
4686 tcg_temp_free_i32(r1);
4687 tcg_temp_free_i32(r2);
4688 set_cc_static(s);
4689 return DISAS_NEXT;
4692 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4694 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4695 return DISAS_NEXT;
4698 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4700 DisasCompare cmp;
4701 TCGv_i64 borrow;
4703 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4705 /* The !borrow flag is the msb of CC. Since we want the inverse of
4706 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4707 disas_jcc(s, &cmp, 8 | 4);
4708 borrow = tcg_temp_new_i64();
4709 if (cmp.is_64) {
4710 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4711 } else {
4712 TCGv_i32 t = tcg_temp_new_i32();
4713 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4714 tcg_gen_extu_i32_i64(borrow, t);
4715 tcg_temp_free_i32(t);
4717 free_compare(&cmp);
4719 tcg_gen_sub_i64(o->out, o->out, borrow);
4720 tcg_temp_free_i64(borrow);
4721 return DISAS_NEXT;
4724 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4726 TCGv_i32 t;
4728 update_psw_addr(s);
4729 update_cc_op(s);
4731 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4732 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4733 tcg_temp_free_i32(t);
4735 t = tcg_const_i32(s->ilen);
4736 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4737 tcg_temp_free_i32(t);
4739 gen_exception(EXCP_SVC);
4740 return DISAS_NORETURN;
4743 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4745 int cc = 0;
4747 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4748 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4749 gen_op_movi_cc(s, cc);
4750 return DISAS_NEXT;
4753 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4755 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4756 set_cc_static(s);
4757 return DISAS_NEXT;
4760 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4762 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4763 set_cc_static(s);
4764 return DISAS_NEXT;
4767 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4769 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4770 set_cc_static(s);
4771 return DISAS_NEXT;
4774 #ifndef CONFIG_USER_ONLY
4776 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4778 gen_helper_testblock(cc_op, cpu_env, o->in2);
4779 set_cc_static(s);
4780 return DISAS_NEXT;
4783 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4785 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4786 set_cc_static(s);
4787 return DISAS_NEXT;
4790 #endif
4792 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4794 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4795 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4796 tcg_temp_free_i32(l1);
4797 set_cc_static(s);
4798 return DISAS_NEXT;
4801 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4803 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4804 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4805 tcg_temp_free_i32(l);
4806 set_cc_static(s);
4807 return DISAS_NEXT;
4810 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4812 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4813 return_low128(o->out2);
4814 set_cc_static(s);
4815 return DISAS_NEXT;
4818 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4820 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4821 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4822 tcg_temp_free_i32(l);
4823 set_cc_static(s);
4824 return DISAS_NEXT;
4827 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4829 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4830 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4831 tcg_temp_free_i32(l);
4832 set_cc_static(s);
4833 return DISAS_NEXT;
4836 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4838 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4839 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4840 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4841 TCGv_i32 tst = tcg_temp_new_i32();
4842 int m3 = get_field(s->fields, m3);
4844 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4845 m3 = 0;
4847 if (m3 & 1) {
4848 tcg_gen_movi_i32(tst, -1);
4849 } else {
4850 tcg_gen_extrl_i64_i32(tst, regs[0]);
4851 if (s->insn->opc & 3) {
4852 tcg_gen_ext8u_i32(tst, tst);
4853 } else {
4854 tcg_gen_ext16u_i32(tst, tst);
4857 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4859 tcg_temp_free_i32(r1);
4860 tcg_temp_free_i32(r2);
4861 tcg_temp_free_i32(sizes);
4862 tcg_temp_free_i32(tst);
4863 set_cc_static(s);
4864 return DISAS_NEXT;
4867 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4869 TCGv_i32 t1 = tcg_const_i32(0xff);
4870 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4871 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4872 tcg_temp_free_i32(t1);
4873 set_cc_static(s);
4874 return DISAS_NEXT;
4877 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4879 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4880 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4881 tcg_temp_free_i32(l);
4882 return DISAS_NEXT;
4885 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4887 int l1 = get_field(s->fields, l1) + 1;
4888 TCGv_i32 l;
4890 /* The length must not exceed 32 bytes. */
4891 if (l1 > 32) {
4892 gen_program_exception(s, PGM_SPECIFICATION);
4893 return DISAS_NORETURN;
4895 l = tcg_const_i32(l1);
4896 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4897 tcg_temp_free_i32(l);
4898 set_cc_static(s);
4899 return DISAS_NEXT;
4902 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4904 int l1 = get_field(s->fields, l1) + 1;
4905 TCGv_i32 l;
4907 /* The length must be even and should not exceed 64 bytes. */
4908 if ((l1 & 1) || (l1 > 64)) {
4909 gen_program_exception(s, PGM_SPECIFICATION);
4910 return DISAS_NORETURN;
4912 l = tcg_const_i32(l1);
4913 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4914 tcg_temp_free_i32(l);
4915 set_cc_static(s);
4916 return DISAS_NEXT;
4920 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4922 int d1 = get_field(s->fields, d1);
4923 int d2 = get_field(s->fields, d2);
4924 int b1 = get_field(s->fields, b1);
4925 int b2 = get_field(s->fields, b2);
4926 int l = get_field(s->fields, l1);
4927 TCGv_i32 t32;
4929 o->addr1 = get_address(s, 0, b1, d1);
4931 /* If the addresses are identical, this is a store/memset of zero. */
4932 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4933 o->in2 = tcg_const_i64(0);
4935 l++;
4936 while (l >= 8) {
4937 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4938 l -= 8;
4939 if (l > 0) {
4940 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4943 if (l >= 4) {
4944 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4945 l -= 4;
4946 if (l > 0) {
4947 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4950 if (l >= 2) {
4951 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4952 l -= 2;
4953 if (l > 0) {
4954 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4957 if (l) {
4958 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4960 gen_op_movi_cc(s, 0);
4961 return DISAS_NEXT;
4964 /* But in general we'll defer to a helper. */
4965 o->in2 = get_address(s, 0, b2, d2);
4966 t32 = tcg_const_i32(l);
4967 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4968 tcg_temp_free_i32(t32);
4969 set_cc_static(s);
4970 return DISAS_NEXT;
4973 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4975 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4976 return DISAS_NEXT;
4979 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4981 int shift = s->insn->data & 0xff;
4982 int size = s->insn->data >> 8;
4983 uint64_t mask = ((1ull << size) - 1) << shift;
4985 assert(!o->g_in2);
4986 tcg_gen_shli_i64(o->in2, o->in2, shift);
4987 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4989 /* Produce the CC from only the bits manipulated. */
4990 tcg_gen_andi_i64(cc_dst, o->out, mask);
4991 set_cc_nz_u64(s, cc_dst);
4992 return DISAS_NEXT;
4995 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4997 o->in1 = tcg_temp_new_i64();
4999 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5000 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5001 } else {
5002 /* Perform the atomic operation in memory. */
5003 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5004 s->insn->data);
5007 /* Recompute also for atomic case: needed for setting CC. */
5008 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5010 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5011 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5013 return DISAS_NEXT;
5016 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5018 o->out = tcg_const_i64(0);
5019 return DISAS_NEXT;
5022 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5024 o->out = tcg_const_i64(0);
5025 o->out2 = o->out;
5026 o->g_out2 = true;
5027 return DISAS_NEXT;
5030 #ifndef CONFIG_USER_ONLY
5031 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5033 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5035 gen_helper_clp(cpu_env, r2);
5036 tcg_temp_free_i32(r2);
5037 set_cc_static(s);
5038 return DISAS_NEXT;
5041 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5043 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5044 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5046 gen_helper_pcilg(cpu_env, r1, r2);
5047 tcg_temp_free_i32(r1);
5048 tcg_temp_free_i32(r2);
5049 set_cc_static(s);
5050 return DISAS_NEXT;
5053 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5055 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5056 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5058 gen_helper_pcistg(cpu_env, r1, r2);
5059 tcg_temp_free_i32(r1);
5060 tcg_temp_free_i32(r2);
5061 set_cc_static(s);
5062 return DISAS_NEXT;
5065 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5067 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5068 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5070 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5071 tcg_temp_free_i32(ar);
5072 tcg_temp_free_i32(r1);
5073 set_cc_static(s);
5074 return DISAS_NEXT;
5077 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5079 gen_helper_sic(cpu_env, o->in1, o->in2);
5080 return DISAS_NEXT;
5083 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5085 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5086 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5088 gen_helper_rpcit(cpu_env, r1, r2);
5089 tcg_temp_free_i32(r1);
5090 tcg_temp_free_i32(r2);
5091 set_cc_static(s);
5092 return DISAS_NEXT;
5095 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5097 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5098 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
5099 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5101 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5102 tcg_temp_free_i32(ar);
5103 tcg_temp_free_i32(r1);
5104 tcg_temp_free_i32(r3);
5105 set_cc_static(s);
5106 return DISAS_NEXT;
5109 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5111 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5112 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5114 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5115 tcg_temp_free_i32(ar);
5116 tcg_temp_free_i32(r1);
5117 set_cc_static(s);
5118 return DISAS_NEXT;
5120 #endif
5122 #include "translate_vx.inc.c"
5124 /* ====================================================================== */
5125 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5126 the original inputs), update the various cc data structures in order to
5127 be able to compute the new condition code. */
5129 static void cout_abs32(DisasContext *s, DisasOps *o)
5131 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5134 static void cout_abs64(DisasContext *s, DisasOps *o)
5136 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5139 static void cout_adds32(DisasContext *s, DisasOps *o)
5141 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5144 static void cout_adds64(DisasContext *s, DisasOps *o)
5146 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5149 static void cout_addu32(DisasContext *s, DisasOps *o)
5151 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5154 static void cout_addu64(DisasContext *s, DisasOps *o)
5156 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5159 static void cout_addc32(DisasContext *s, DisasOps *o)
5161 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5164 static void cout_addc64(DisasContext *s, DisasOps *o)
5166 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5169 static void cout_cmps32(DisasContext *s, DisasOps *o)
5171 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5174 static void cout_cmps64(DisasContext *s, DisasOps *o)
5176 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5179 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5181 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5184 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5186 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5189 static void cout_f32(DisasContext *s, DisasOps *o)
5191 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5194 static void cout_f64(DisasContext *s, DisasOps *o)
5196 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5199 static void cout_f128(DisasContext *s, DisasOps *o)
5201 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5204 static void cout_nabs32(DisasContext *s, DisasOps *o)
5206 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5209 static void cout_nabs64(DisasContext *s, DisasOps *o)
5211 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5214 static void cout_neg32(DisasContext *s, DisasOps *o)
5216 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5219 static void cout_neg64(DisasContext *s, DisasOps *o)
5221 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5224 static void cout_nz32(DisasContext *s, DisasOps *o)
5226 tcg_gen_ext32u_i64(cc_dst, o->out);
5227 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5230 static void cout_nz64(DisasContext *s, DisasOps *o)
5232 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5235 static void cout_s32(DisasContext *s, DisasOps *o)
5237 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5240 static void cout_s64(DisasContext *s, DisasOps *o)
5242 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5245 static void cout_subs32(DisasContext *s, DisasOps *o)
5247 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5250 static void cout_subs64(DisasContext *s, DisasOps *o)
5252 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5255 static void cout_subu32(DisasContext *s, DisasOps *o)
5257 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5260 static void cout_subu64(DisasContext *s, DisasOps *o)
5262 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5265 static void cout_subb32(DisasContext *s, DisasOps *o)
5267 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5270 static void cout_subb64(DisasContext *s, DisasOps *o)
5272 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5275 static void cout_tm32(DisasContext *s, DisasOps *o)
5277 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5280 static void cout_tm64(DisasContext *s, DisasOps *o)
5282 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5285 /* ====================================================================== */
5286 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5287 with the TCG register to which we will write. Used in combination with
5288 the "wout" generators, in some cases we need a new temporary, and in
5289 some cases we can write to a TCG global. */
5291 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5293 o->out = tcg_temp_new_i64();
5295 #define SPEC_prep_new 0
5297 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5299 o->out = tcg_temp_new_i64();
5300 o->out2 = tcg_temp_new_i64();
5302 #define SPEC_prep_new_P 0
5304 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5306 o->out = regs[get_field(f, r1)];
5307 o->g_out = true;
5309 #define SPEC_prep_r1 0
5311 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5313 int r1 = get_field(f, r1);
5314 o->out = regs[r1];
5315 o->out2 = regs[r1 + 1];
5316 o->g_out = o->g_out2 = true;
5318 #define SPEC_prep_r1_P SPEC_r1_even
5320 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5321 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5323 o->out = load_freg(get_field(f, r1));
5324 o->out2 = load_freg(get_field(f, r1) + 2);
5326 #define SPEC_prep_x1 SPEC_r1_f128
5328 /* ====================================================================== */
5329 /* The "Write OUTput" generators. These generally perform some non-trivial
5330 copy of data to TCG globals, or to main memory. The trivial cases are
5331 generally handled by having a "prep" generator install the TCG global
5332 as the destination of the operation. */
5334 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5336 store_reg(get_field(f, r1), o->out);
5338 #define SPEC_wout_r1 0
5340 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5342 int r1 = get_field(f, r1);
5343 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5345 #define SPEC_wout_r1_8 0
5347 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5349 int r1 = get_field(f, r1);
5350 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5352 #define SPEC_wout_r1_16 0
5354 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5356 store_reg32_i64(get_field(f, r1), o->out);
5358 #define SPEC_wout_r1_32 0
5360 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5362 store_reg32h_i64(get_field(f, r1), o->out);
5364 #define SPEC_wout_r1_32h 0
5366 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5368 int r1 = get_field(f, r1);
5369 store_reg32_i64(r1, o->out);
5370 store_reg32_i64(r1 + 1, o->out2);
5372 #define SPEC_wout_r1_P32 SPEC_r1_even
5374 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5376 int r1 = get_field(f, r1);
5377 store_reg32_i64(r1 + 1, o->out);
5378 tcg_gen_shri_i64(o->out, o->out, 32);
5379 store_reg32_i64(r1, o->out);
5381 #define SPEC_wout_r1_D32 SPEC_r1_even
5383 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5385 int r3 = get_field(f, r3);
5386 store_reg32_i64(r3, o->out);
5387 store_reg32_i64(r3 + 1, o->out2);
5389 #define SPEC_wout_r3_P32 SPEC_r3_even
5391 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5393 int r3 = get_field(f, r3);
5394 store_reg(r3, o->out);
5395 store_reg(r3 + 1, o->out2);
5397 #define SPEC_wout_r3_P64 SPEC_r3_even
5399 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5401 store_freg32_i64(get_field(f, r1), o->out);
5403 #define SPEC_wout_e1 0
5405 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5407 store_freg(get_field(f, r1), o->out);
5409 #define SPEC_wout_f1 0
5411 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5413 int f1 = get_field(s->fields, r1);
5414 store_freg(f1, o->out);
5415 store_freg(f1 + 2, o->out2);
5417 #define SPEC_wout_x1 SPEC_r1_f128
5419 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5421 if (get_field(f, r1) != get_field(f, r2)) {
5422 store_reg32_i64(get_field(f, r1), o->out);
5425 #define SPEC_wout_cond_r1r2_32 0
5427 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5429 if (get_field(f, r1) != get_field(f, r2)) {
5430 store_freg32_i64(get_field(f, r1), o->out);
5433 #define SPEC_wout_cond_e1e2 0
5435 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5437 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5439 #define SPEC_wout_m1_8 0
5441 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5443 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5445 #define SPEC_wout_m1_16 0
5447 #ifndef CONFIG_USER_ONLY
5448 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5450 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5452 #define SPEC_wout_m1_16a 0
5453 #endif
5455 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5457 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5459 #define SPEC_wout_m1_32 0
5461 #ifndef CONFIG_USER_ONLY
5462 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5464 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5466 #define SPEC_wout_m1_32a 0
5467 #endif
5469 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5471 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5473 #define SPEC_wout_m1_64 0
5475 #ifndef CONFIG_USER_ONLY
5476 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5478 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5480 #define SPEC_wout_m1_64a 0
5481 #endif
5483 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5485 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5487 #define SPEC_wout_m2_32 0
5489 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5491 store_reg(get_field(f, r1), o->in2);
5493 #define SPEC_wout_in2_r1 0
5495 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5497 store_reg32_i64(get_field(f, r1), o->in2);
5499 #define SPEC_wout_in2_r1_32 0
5501 /* ====================================================================== */
5502 /* The "INput 1" generators. These load the first operand to an insn. */
5504 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5506 o->in1 = load_reg(get_field(f, r1));
5508 #define SPEC_in1_r1 0
5510 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5512 o->in1 = regs[get_field(f, r1)];
5513 o->g_in1 = true;
5515 #define SPEC_in1_r1_o 0
5517 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5519 o->in1 = tcg_temp_new_i64();
5520 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5522 #define SPEC_in1_r1_32s 0
5524 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5526 o->in1 = tcg_temp_new_i64();
5527 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5529 #define SPEC_in1_r1_32u 0
5531 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5533 o->in1 = tcg_temp_new_i64();
5534 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5536 #define SPEC_in1_r1_sr32 0
5538 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5540 o->in1 = load_reg(get_field(f, r1) + 1);
5542 #define SPEC_in1_r1p1 SPEC_r1_even
5544 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5546 o->in1 = tcg_temp_new_i64();
5547 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5549 #define SPEC_in1_r1p1_32s SPEC_r1_even
5551 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5553 o->in1 = tcg_temp_new_i64();
5554 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5556 #define SPEC_in1_r1p1_32u SPEC_r1_even
5558 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5560 int r1 = get_field(f, r1);
5561 o->in1 = tcg_temp_new_i64();
5562 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5564 #define SPEC_in1_r1_D32 SPEC_r1_even
5566 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5568 o->in1 = load_reg(get_field(f, r2));
5570 #define SPEC_in1_r2 0
5572 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5574 o->in1 = tcg_temp_new_i64();
5575 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5577 #define SPEC_in1_r2_sr32 0
5579 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5581 o->in1 = load_reg(get_field(f, r3));
5583 #define SPEC_in1_r3 0
5585 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5587 o->in1 = regs[get_field(f, r3)];
5588 o->g_in1 = true;
5590 #define SPEC_in1_r3_o 0
5592 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5594 o->in1 = tcg_temp_new_i64();
5595 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5597 #define SPEC_in1_r3_32s 0
5599 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5601 o->in1 = tcg_temp_new_i64();
5602 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5604 #define SPEC_in1_r3_32u 0
5606 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5608 int r3 = get_field(f, r3);
5609 o->in1 = tcg_temp_new_i64();
5610 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5612 #define SPEC_in1_r3_D32 SPEC_r3_even
5614 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5616 o->in1 = load_freg32_i64(get_field(f, r1));
5618 #define SPEC_in1_e1 0
5620 static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5622 o->in1 = load_freg(get_field(f, r1));
5624 #define SPEC_in1_f1 0
5626 /* Load the high double word of an extended (128-bit) format FP number */
5627 static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5629 o->in1 = load_freg(get_field(f, r2));
5631 #define SPEC_in1_x2h SPEC_r2_f128
5633 static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5635 o->in1 = load_freg(get_field(f, r3));
5637 #define SPEC_in1_f3 0
5639 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5641 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5643 #define SPEC_in1_la1 0
5645 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5647 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5648 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5650 #define SPEC_in1_la2 0
5652 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5654 in1_la1(s, f, o);
5655 o->in1 = tcg_temp_new_i64();
5656 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5658 #define SPEC_in1_m1_8u 0
5660 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5662 in1_la1(s, f, o);
5663 o->in1 = tcg_temp_new_i64();
5664 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5666 #define SPEC_in1_m1_16s 0
5668 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5670 in1_la1(s, f, o);
5671 o->in1 = tcg_temp_new_i64();
5672 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5674 #define SPEC_in1_m1_16u 0
5676 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5678 in1_la1(s, f, o);
5679 o->in1 = tcg_temp_new_i64();
5680 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5682 #define SPEC_in1_m1_32s 0
5684 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5686 in1_la1(s, f, o);
5687 o->in1 = tcg_temp_new_i64();
5688 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5690 #define SPEC_in1_m1_32u 0
5692 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5694 in1_la1(s, f, o);
5695 o->in1 = tcg_temp_new_i64();
5696 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5698 #define SPEC_in1_m1_64 0
5700 /* ====================================================================== */
5701 /* The "INput 2" generators. These load the second operand to an insn. */
5703 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5705 o->in2 = regs[get_field(f, r1)];
5706 o->g_in2 = true;
5708 #define SPEC_in2_r1_o 0
5710 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5712 o->in2 = tcg_temp_new_i64();
5713 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5715 #define SPEC_in2_r1_16u 0
5717 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5719 o->in2 = tcg_temp_new_i64();
5720 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5722 #define SPEC_in2_r1_32u 0
5724 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5726 int r1 = get_field(f, r1);
5727 o->in2 = tcg_temp_new_i64();
5728 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5730 #define SPEC_in2_r1_D32 SPEC_r1_even
5732 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5734 o->in2 = load_reg(get_field(f, r2));
5736 #define SPEC_in2_r2 0
5738 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5740 o->in2 = regs[get_field(f, r2)];
5741 o->g_in2 = true;
5743 #define SPEC_in2_r2_o 0
5745 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5747 int r2 = get_field(f, r2);
5748 if (r2 != 0) {
5749 o->in2 = load_reg(r2);
5752 #define SPEC_in2_r2_nz 0
5754 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5756 o->in2 = tcg_temp_new_i64();
5757 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5759 #define SPEC_in2_r2_8s 0
5761 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5763 o->in2 = tcg_temp_new_i64();
5764 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5766 #define SPEC_in2_r2_8u 0
5768 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5770 o->in2 = tcg_temp_new_i64();
5771 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5773 #define SPEC_in2_r2_16s 0
5775 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5777 o->in2 = tcg_temp_new_i64();
5778 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5780 #define SPEC_in2_r2_16u 0
5782 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5784 o->in2 = load_reg(get_field(f, r3));
5786 #define SPEC_in2_r3 0
5788 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5790 o->in2 = tcg_temp_new_i64();
5791 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5793 #define SPEC_in2_r3_sr32 0
5795 static void in2_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5797 o->in2 = tcg_temp_new_i64();
5798 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r3)]);
5800 #define SPEC_in2_r3_32u 0
5802 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5804 o->in2 = tcg_temp_new_i64();
5805 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5807 #define SPEC_in2_r2_32s 0
5809 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5811 o->in2 = tcg_temp_new_i64();
5812 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5814 #define SPEC_in2_r2_32u 0
5816 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5818 o->in2 = tcg_temp_new_i64();
5819 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5821 #define SPEC_in2_r2_sr32 0
5823 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5825 o->in2 = load_freg32_i64(get_field(f, r2));
5827 #define SPEC_in2_e2 0
5829 static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5831 o->in2 = load_freg(get_field(f, r2));
5833 #define SPEC_in2_f2 0
5835 /* Load the low double word of an extended (128-bit) format FP number */
5836 static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5838 o->in2 = load_freg(get_field(f, r2) + 2);
5840 #define SPEC_in2_x2l SPEC_r2_f128
5842 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5844 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5846 #define SPEC_in2_ra2 0
5848 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5850 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5851 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5853 #define SPEC_in2_a2 0
5855 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5857 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5859 #define SPEC_in2_ri2 0
5861 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5863 help_l2_shift(s, f, o, 31);
5865 #define SPEC_in2_sh32 0
5867 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5869 help_l2_shift(s, f, o, 63);
5871 #define SPEC_in2_sh64 0
5873 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5875 in2_a2(s, f, o);
5876 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5878 #define SPEC_in2_m2_8u 0
5880 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5882 in2_a2(s, f, o);
5883 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5885 #define SPEC_in2_m2_16s 0
5887 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5889 in2_a2(s, f, o);
5890 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5892 #define SPEC_in2_m2_16u 0
5894 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5896 in2_a2(s, f, o);
5897 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5899 #define SPEC_in2_m2_32s 0
5901 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5903 in2_a2(s, f, o);
5904 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5906 #define SPEC_in2_m2_32u 0
5908 #ifndef CONFIG_USER_ONLY
5909 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5911 in2_a2(s, f, o);
5912 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5914 #define SPEC_in2_m2_32ua 0
5915 #endif
5917 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5919 in2_a2(s, f, o);
5920 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5922 #define SPEC_in2_m2_64 0
5924 #ifndef CONFIG_USER_ONLY
5925 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5927 in2_a2(s, f, o);
5928 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5930 #define SPEC_in2_m2_64a 0
5931 #endif
5933 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5935 in2_ri2(s, f, o);
5936 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5938 #define SPEC_in2_mri2_16u 0
5940 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5942 in2_ri2(s, f, o);
5943 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5945 #define SPEC_in2_mri2_32s 0
5947 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5949 in2_ri2(s, f, o);
5950 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5952 #define SPEC_in2_mri2_32u 0
5954 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5956 in2_ri2(s, f, o);
5957 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5959 #define SPEC_in2_mri2_64 0
5961 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5963 o->in2 = tcg_const_i64(get_field(f, i2));
5965 #define SPEC_in2_i2 0
5967 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5969 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5971 #define SPEC_in2_i2_8u 0
5973 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5975 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5977 #define SPEC_in2_i2_16u 0
5979 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5981 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5983 #define SPEC_in2_i2_32u 0
5985 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5987 uint64_t i2 = (uint16_t)get_field(f, i2);
5988 o->in2 = tcg_const_i64(i2 << s->insn->data);
5990 #define SPEC_in2_i2_16u_shl 0
5992 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5994 uint64_t i2 = (uint32_t)get_field(f, i2);
5995 o->in2 = tcg_const_i64(i2 << s->insn->data);
5997 #define SPEC_in2_i2_32u_shl 0
5999 #ifndef CONFIG_USER_ONLY
6000 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
6002 o->in2 = tcg_const_i64(s->fields->raw_insn);
6004 #define SPEC_in2_insn 0
6005 #endif
6007 /* ====================================================================== */
6009 /* Find opc within the table of insns. This is formulated as a switch
6010 statement so that (1) we get compile-time notice of cut-paste errors
6011 for duplicated opcodes, and (2) the compiler generates the binary
6012 search tree, rather than us having to post-process the table. */
6014 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6015 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6017 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6018 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6020 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6021 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6023 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6025 enum DisasInsnEnum {
6026 #include "insn-data.def"
6029 #undef E
6030 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6031 .opc = OPC, \
6032 .flags = FL, \
6033 .fmt = FMT_##FT, \
6034 .fac = FAC_##FC, \
6035 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6036 .name = #NM, \
6037 .help_in1 = in1_##I1, \
6038 .help_in2 = in2_##I2, \
6039 .help_prep = prep_##P, \
6040 .help_wout = wout_##W, \
6041 .help_cout = cout_##CC, \
6042 .help_op = op_##OP, \
6043 .data = D \
6046 /* Allow 0 to be used for NULL in the table below. */
6047 #define in1_0 NULL
6048 #define in2_0 NULL
6049 #define prep_0 NULL
6050 #define wout_0 NULL
6051 #define cout_0 NULL
6052 #define op_0 NULL
6054 #define SPEC_in1_0 0
6055 #define SPEC_in2_0 0
6056 #define SPEC_prep_0 0
6057 #define SPEC_wout_0 0
6059 /* Give smaller names to the various facilities. */
6060 #define FAC_Z S390_FEAT_ZARCH
6061 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6062 #define FAC_DFP S390_FEAT_DFP
6063 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6064 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6065 #define FAC_EE S390_FEAT_EXECUTE_EXT
6066 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6067 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6068 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6069 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6070 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6071 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6072 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6073 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6074 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6075 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6076 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6077 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6078 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6079 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6080 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6081 #define FAC_SFLE S390_FEAT_STFLE
6082 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6083 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6084 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6085 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6086 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6087 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6088 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6089 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6090 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6091 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6092 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6093 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6094 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6095 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6096 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6097 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6098 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6099 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6101 static const DisasInsn insn_info[] = {
6102 #include "insn-data.def"
6105 #undef E
6106 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6107 case OPC: return &insn_info[insn_ ## NM];
6109 static const DisasInsn *lookup_opc(uint16_t opc)
6111 switch (opc) {
6112 #include "insn-data.def"
6113 default:
6114 return NULL;
6118 #undef F
6119 #undef E
6120 #undef D
6121 #undef C
6123 /* Extract a field from the insn. The INSN should be left-aligned in
6124 the uint64_t so that we can more easily utilize the big-bit-endian
6125 definitions we extract from the Principals of Operation. */
6127 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6129 uint32_t r, m;
6131 if (f->size == 0) {
6132 return;
6135 /* Zero extract the field from the insn. */
6136 r = (insn << f->beg) >> (64 - f->size);
6138 /* Sign-extend, or un-swap the field as necessary. */
6139 switch (f->type) {
6140 case 0: /* unsigned */
6141 break;
6142 case 1: /* signed */
6143 assert(f->size <= 32);
6144 m = 1u << (f->size - 1);
6145 r = (r ^ m) - m;
6146 break;
6147 case 2: /* dl+dh split, signed 20 bit. */
6148 r = ((int8_t)r << 12) | (r >> 8);
6149 break;
6150 case 3: /* MSB stored in RXB */
6151 g_assert(f->size == 4);
6152 switch (f->beg) {
6153 case 8:
6154 r |= extract64(insn, 63 - 36, 1) << 4;
6155 break;
6156 case 12:
6157 r |= extract64(insn, 63 - 37, 1) << 4;
6158 break;
6159 case 16:
6160 r |= extract64(insn, 63 - 38, 1) << 4;
6161 break;
6162 case 32:
6163 r |= extract64(insn, 63 - 39, 1) << 4;
6164 break;
6165 default:
6166 g_assert_not_reached();
6168 break;
6169 default:
6170 abort();
6173 /* Validate that the "compressed" encoding we selected above is valid.
6174 I.e. we havn't make two different original fields overlap. */
6175 assert(((o->presentC >> f->indexC) & 1) == 0);
6176 o->presentC |= 1 << f->indexC;
6177 o->presentO |= 1 << f->indexO;
6179 o->c[f->indexC] = r;
6182 /* Lookup the insn at the current PC, extracting the operands into O and
6183 returning the info struct for the insn. Returns NULL for invalid insn. */
6185 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
6186 DisasFields *f)
6188 uint64_t insn, pc = s->base.pc_next;
6189 int op, op2, ilen;
6190 const DisasInsn *info;
6192 if (unlikely(s->ex_value)) {
6193 /* Drop the EX data now, so that it's clear on exception paths. */
6194 TCGv_i64 zero = tcg_const_i64(0);
6195 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6196 tcg_temp_free_i64(zero);
6198 /* Extract the values saved by EXECUTE. */
6199 insn = s->ex_value & 0xffffffffffff0000ull;
6200 ilen = s->ex_value & 0xf;
6201 op = insn >> 56;
6202 } else {
6203 insn = ld_code2(env, pc);
6204 op = (insn >> 8) & 0xff;
6205 ilen = get_ilen(op);
6206 switch (ilen) {
6207 case 2:
6208 insn = insn << 48;
6209 break;
6210 case 4:
6211 insn = ld_code4(env, pc) << 32;
6212 break;
6213 case 6:
6214 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6215 break;
6216 default:
6217 g_assert_not_reached();
6220 s->pc_tmp = s->base.pc_next + ilen;
6221 s->ilen = ilen;
6223 /* We can't actually determine the insn format until we've looked up
6224 the full insn opcode. Which we can't do without locating the
6225 secondary opcode. Assume by default that OP2 is at bit 40; for
6226 those smaller insns that don't actually have a secondary opcode
6227 this will correctly result in OP2 = 0. */
6228 switch (op) {
6229 case 0x01: /* E */
6230 case 0x80: /* S */
6231 case 0x82: /* S */
6232 case 0x93: /* S */
6233 case 0xb2: /* S, RRF, RRE, IE */
6234 case 0xb3: /* RRE, RRD, RRF */
6235 case 0xb9: /* RRE, RRF */
6236 case 0xe5: /* SSE, SIL */
6237 op2 = (insn << 8) >> 56;
6238 break;
6239 case 0xa5: /* RI */
6240 case 0xa7: /* RI */
6241 case 0xc0: /* RIL */
6242 case 0xc2: /* RIL */
6243 case 0xc4: /* RIL */
6244 case 0xc6: /* RIL */
6245 case 0xc8: /* SSF */
6246 case 0xcc: /* RIL */
6247 op2 = (insn << 12) >> 60;
6248 break;
6249 case 0xc5: /* MII */
6250 case 0xc7: /* SMI */
6251 case 0xd0 ... 0xdf: /* SS */
6252 case 0xe1: /* SS */
6253 case 0xe2: /* SS */
6254 case 0xe8: /* SS */
6255 case 0xe9: /* SS */
6256 case 0xea: /* SS */
6257 case 0xee ... 0xf3: /* SS */
6258 case 0xf8 ... 0xfd: /* SS */
6259 op2 = 0;
6260 break;
6261 default:
6262 op2 = (insn << 40) >> 56;
6263 break;
6266 memset(f, 0, sizeof(*f));
6267 f->raw_insn = insn;
6268 f->op = op;
6269 f->op2 = op2;
6271 /* Lookup the instruction. */
6272 info = lookup_opc(op << 8 | op2);
6274 /* If we found it, extract the operands. */
6275 if (info != NULL) {
6276 DisasFormat fmt = info->fmt;
6277 int i;
6279 for (i = 0; i < NUM_C_FIELD; ++i) {
6280 extract_field(f, &format_info[fmt].op[i], insn);
6283 return info;
6286 static bool is_afp_reg(int reg)
6288 return reg % 2 || reg > 6;
6291 static bool is_fp_pair(int reg)
6293 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6294 return !(reg & 0x2);
6297 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6299 const DisasInsn *insn;
6300 DisasJumpType ret = DISAS_NEXT;
6301 DisasFields f;
6302 DisasOps o = {};
6304 /* Search for the insn in the table. */
6305 insn = extract_insn(env, s, &f);
6307 /* Emit insn_start now that we know the ILEN. */
6308 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6310 /* Not found means unimplemented/illegal opcode. */
6311 if (insn == NULL) {
6312 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6313 f.op, f.op2);
6314 gen_illegal_opcode(s);
6315 return DISAS_NORETURN;
6318 #ifndef CONFIG_USER_ONLY
6319 if (s->base.tb->flags & FLAG_MASK_PER) {
6320 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6321 gen_helper_per_ifetch(cpu_env, addr);
6322 tcg_temp_free_i64(addr);
6324 #endif
6326 /* process flags */
6327 if (insn->flags) {
6328 /* privileged instruction */
6329 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6330 gen_program_exception(s, PGM_PRIVILEGED);
6331 return DISAS_NORETURN;
6334 /* if AFP is not enabled, instructions and registers are forbidden */
6335 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6336 uint8_t dxc = 0;
6338 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6339 dxc = 1;
6341 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6342 dxc = 1;
6344 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6345 dxc = 1;
6347 if (insn->flags & IF_BFP) {
6348 dxc = 2;
6350 if (insn->flags & IF_DFP) {
6351 dxc = 3;
6353 if (insn->flags & IF_VEC) {
6354 dxc = 0xfe;
6356 if (dxc) {
6357 gen_data_exception(dxc);
6358 return DISAS_NORETURN;
6362 /* if vector instructions not enabled, executing them is forbidden */
6363 if (insn->flags & IF_VEC) {
6364 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6365 gen_data_exception(0xfe);
6366 return DISAS_NORETURN;
6371 /* Check for insn specification exceptions. */
6372 if (insn->spec) {
6373 if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6374 (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6375 (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6376 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6377 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6378 gen_program_exception(s, PGM_SPECIFICATION);
6379 return DISAS_NORETURN;
6383 /* Set up the strutures we use to communicate with the helpers. */
6384 s->insn = insn;
6385 s->fields = &f;
6387 /* Implement the instruction. */
6388 if (insn->help_in1) {
6389 insn->help_in1(s, &f, &o);
6391 if (insn->help_in2) {
6392 insn->help_in2(s, &f, &o);
6394 if (insn->help_prep) {
6395 insn->help_prep(s, &f, &o);
6397 if (insn->help_op) {
6398 ret = insn->help_op(s, &o);
6400 if (ret != DISAS_NORETURN) {
6401 if (insn->help_wout) {
6402 insn->help_wout(s, &f, &o);
6404 if (insn->help_cout) {
6405 insn->help_cout(s, &o);
6409 /* Free any temporaries created by the helpers. */
6410 if (o.out && !o.g_out) {
6411 tcg_temp_free_i64(o.out);
6413 if (o.out2 && !o.g_out2) {
6414 tcg_temp_free_i64(o.out2);
6416 if (o.in1 && !o.g_in1) {
6417 tcg_temp_free_i64(o.in1);
6419 if (o.in2 && !o.g_in2) {
6420 tcg_temp_free_i64(o.in2);
6422 if (o.addr1) {
6423 tcg_temp_free_i64(o.addr1);
6426 #ifndef CONFIG_USER_ONLY
6427 if (s->base.tb->flags & FLAG_MASK_PER) {
6428 /* An exception might be triggered, save PSW if not already done. */
6429 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6430 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6433 /* Call the helper to check for a possible PER exception. */
6434 gen_helper_per_check_exception(cpu_env);
6436 #endif
6438 /* Advance to the next instruction. */
6439 s->base.pc_next = s->pc_tmp;
6440 return ret;
6443 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6445 DisasContext *dc = container_of(dcbase, DisasContext, base);
6447 /* 31-bit mode */
6448 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6449 dc->base.pc_first &= 0x7fffffff;
6450 dc->base.pc_next = dc->base.pc_first;
6453 dc->cc_op = CC_OP_DYNAMIC;
6454 dc->ex_value = dc->base.tb->cs_base;
6455 dc->do_debug = dc->base.singlestep_enabled;
6458 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6462 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6466 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6467 const CPUBreakpoint *bp)
6469 DisasContext *dc = container_of(dcbase, DisasContext, base);
6472 * Emit an insn_start to accompany the breakpoint exception.
6473 * The ILEN value is a dummy, since this does not result in
6474 * an s390x exception, but an internal qemu exception which
6475 * brings us back to interact with the gdbstub.
6477 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6479 dc->base.is_jmp = DISAS_PC_STALE;
6480 dc->do_debug = true;
6481 /* The address covered by the breakpoint must be included in
6482 [tb->pc, tb->pc + tb->size) in order to for it to be
6483 properly cleared -- thus we increment the PC here so that
6484 the logic setting tb->size does the right thing. */
6485 dc->base.pc_next += 2;
6486 return true;
6489 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6491 CPUS390XState *env = cs->env_ptr;
6492 DisasContext *dc = container_of(dcbase, DisasContext, base);
6494 dc->base.is_jmp = translate_one(env, dc);
6495 if (dc->base.is_jmp == DISAS_NEXT) {
6496 uint64_t page_start;
6498 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6499 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6500 dc->base.is_jmp = DISAS_TOO_MANY;
6505 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6507 DisasContext *dc = container_of(dcbase, DisasContext, base);
6509 switch (dc->base.is_jmp) {
6510 case DISAS_GOTO_TB:
6511 case DISAS_NORETURN:
6512 break;
6513 case DISAS_TOO_MANY:
6514 case DISAS_PC_STALE:
6515 case DISAS_PC_STALE_NOCHAIN:
6516 update_psw_addr(dc);
6517 /* FALLTHRU */
6518 case DISAS_PC_UPDATED:
6519 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6520 cc op type is in env */
6521 update_cc_op(dc);
6522 /* FALLTHRU */
6523 case DISAS_PC_CC_UPDATED:
6524 /* Exit the TB, either by raising a debug exception or by return. */
6525 if (dc->do_debug) {
6526 gen_exception(EXCP_DEBUG);
6527 } else if (use_exit_tb(dc) ||
6528 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6529 tcg_gen_exit_tb(NULL, 0);
6530 } else {
6531 tcg_gen_lookup_and_goto_ptr();
6533 break;
6534 default:
6535 g_assert_not_reached();
6539 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6541 DisasContext *dc = container_of(dcbase, DisasContext, base);
6543 if (unlikely(dc->ex_value)) {
6544 /* ??? Unfortunately log_target_disas can't use host memory. */
6545 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6546 } else {
6547 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6548 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6552 static const TranslatorOps s390x_tr_ops = {
6553 .init_disas_context = s390x_tr_init_disas_context,
6554 .tb_start = s390x_tr_tb_start,
6555 .insn_start = s390x_tr_insn_start,
6556 .breakpoint_check = s390x_tr_breakpoint_check,
6557 .translate_insn = s390x_tr_translate_insn,
6558 .tb_stop = s390x_tr_tb_stop,
6559 .disas_log = s390x_tr_disas_log,
6562 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6564 DisasContext dc;
6566 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6569 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6570 target_ulong *data)
6572 int cc_op = data[1];
6574 env->psw.addr = data[0];
6576 /* Update the CC opcode if it is not already up-to-date. */
6577 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6578 env->cc_op = cc_op;
6581 /* Record ILEN. */
6582 env->int_pgm_ilen = data[2];