ppc/pnv: Introduce PnvChipClass::xscom_core_base() method
[qemu/ar7.git] / target / s390x / translate.c
blob151dfa91fb9f4989648c153ff398b3852a6dd65d
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 DisasContextBase base;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t ex_value;
62 * During translate_one(), pc_tmp is used to determine the instruction
63 * to be executed after base.pc_next - e.g. next sequential instruction
64 * or a branch target.
66 uint64_t pc_tmp;
67 uint32_t ilen;
68 enum cc_op cc_op;
69 bool do_debug;
72 /* Information carried about a condition to be evaluated. */
73 typedef struct {
74 TCGCond cond:8;
75 bool is_64;
76 bool g1;
77 bool g2;
78 union {
79 struct { TCGv_i64 a, b; } s64;
80 struct { TCGv_i32 a, b; } s32;
81 } u;
82 } DisasCompare;
84 #ifdef DEBUG_INLINE_BRANCHES
85 static uint64_t inline_branch_hit[CC_OP_MAX];
86 static uint64_t inline_branch_miss[CC_OP_MAX];
87 #endif
89 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
91 TCGv_i64 tmp;
93 if (s->base.tb->flags & FLAG_MASK_32) {
94 if (s->base.tb->flags & FLAG_MASK_64) {
95 tcg_gen_movi_i64(out, pc);
96 return;
98 pc |= 0x80000000;
100 assert(!(s->base.tb->flags & FLAG_MASK_64));
101 tmp = tcg_const_i64(pc);
102 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
103 tcg_temp_free_i64(tmp);
106 static TCGv_i64 psw_addr;
107 static TCGv_i64 psw_mask;
108 static TCGv_i64 gbea;
110 static TCGv_i32 cc_op;
111 static TCGv_i64 cc_src;
112 static TCGv_i64 cc_dst;
113 static TCGv_i64 cc_vr;
115 static char cpu_reg_names[16][4];
116 static TCGv_i64 regs[16];
118 void s390x_translate_init(void)
120 int i;
122 psw_addr = tcg_global_mem_new_i64(cpu_env,
123 offsetof(CPUS390XState, psw.addr),
124 "psw_addr");
125 psw_mask = tcg_global_mem_new_i64(cpu_env,
126 offsetof(CPUS390XState, psw.mask),
127 "psw_mask");
128 gbea = tcg_global_mem_new_i64(cpu_env,
129 offsetof(CPUS390XState, gbea),
130 "gbea");
132 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
133 "cc_op");
134 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
135 "cc_src");
136 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
137 "cc_dst");
138 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
139 "cc_vr");
141 for (i = 0; i < 16; i++) {
142 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
143 regs[i] = tcg_global_mem_new(cpu_env,
144 offsetof(CPUS390XState, regs[i]),
145 cpu_reg_names[i]);
149 static inline int vec_full_reg_offset(uint8_t reg)
151 g_assert(reg < 32);
152 return offsetof(CPUS390XState, vregs[reg][0]);
155 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
157 /* Convert element size (es) - e.g. MO_8 - to bytes */
158 const uint8_t bytes = 1 << es;
159 int offs = enr * bytes;
162 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
163 * of the 16 byte vector, on both, little and big endian systems.
165 * Big Endian (target/possible host)
166 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
167 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
168 * W: [ 0][ 1] - [ 2][ 3]
169 * DW: [ 0] - [ 1]
171 * Little Endian (possible host)
172 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
173 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
174 * W: [ 1][ 0] - [ 3][ 2]
175 * DW: [ 0] - [ 1]
177 * For 16 byte elements, the two 8 byte halves will not form a host
178 * int128 if the host is little endian, since they're in the wrong order.
179 * Some operations (e.g. xor) do not care. For operations like addition,
180 * the two 8 byte elements have to be loaded separately. Let's force all
181 * 16 byte operations to handle it in a special way.
183 g_assert(es <= MO_64);
184 #ifndef HOST_WORDS_BIGENDIAN
185 offs ^= (8 - bytes);
186 #endif
187 return offs + vec_full_reg_offset(reg);
190 static inline int freg64_offset(uint8_t reg)
192 g_assert(reg < 16);
193 return vec_reg_offset(reg, 0, MO_64);
196 static inline int freg32_offset(uint8_t reg)
198 g_assert(reg < 16);
199 return vec_reg_offset(reg, 0, MO_32);
202 static TCGv_i64 load_reg(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_mov_i64(r, regs[reg]);
206 return r;
209 static TCGv_i64 load_freg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
214 return r;
217 static TCGv_i64 load_freg32_i64(int reg)
219 TCGv_i64 r = tcg_temp_new_i64();
221 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
222 return r;
225 static void store_reg(int reg, TCGv_i64 v)
227 tcg_gen_mov_i64(regs[reg], v);
230 static void store_freg(int reg, TCGv_i64 v)
232 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
235 static void store_reg32_i64(int reg, TCGv_i64 v)
237 /* 32 bit register writes keep the upper half */
238 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
241 static void store_reg32h_i64(int reg, TCGv_i64 v)
243 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
246 static void store_freg32_i64(int reg, TCGv_i64 v)
248 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
251 static void return_low128(TCGv_i64 dest)
253 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
256 static void update_psw_addr(DisasContext *s)
258 /* psw.addr */
259 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
262 static void per_branch(DisasContext *s, bool to_next)
264 #ifndef CONFIG_USER_ONLY
265 tcg_gen_movi_i64(gbea, s->base.pc_next);
267 if (s->base.tb->flags & FLAG_MASK_PER) {
268 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
269 gen_helper_per_branch(cpu_env, gbea, next_pc);
270 if (to_next) {
271 tcg_temp_free_i64(next_pc);
274 #endif
277 static void per_branch_cond(DisasContext *s, TCGCond cond,
278 TCGv_i64 arg1, TCGv_i64 arg2)
280 #ifndef CONFIG_USER_ONLY
281 if (s->base.tb->flags & FLAG_MASK_PER) {
282 TCGLabel *lab = gen_new_label();
283 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
285 tcg_gen_movi_i64(gbea, s->base.pc_next);
286 gen_helper_per_branch(cpu_env, gbea, psw_addr);
288 gen_set_label(lab);
289 } else {
290 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
291 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
292 tcg_temp_free_i64(pc);
294 #endif
297 static void per_breaking_event(DisasContext *s)
299 tcg_gen_movi_i64(gbea, s->base.pc_next);
302 static void update_cc_op(DisasContext *s)
304 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
305 tcg_gen_movi_i32(cc_op, s->cc_op);
309 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
311 return (uint64_t)cpu_lduw_code(env, pc);
314 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
316 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
319 static int get_mem_index(DisasContext *s)
321 #ifdef CONFIG_USER_ONLY
322 return MMU_USER_IDX;
323 #else
324 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
325 return MMU_REAL_IDX;
328 switch (s->base.tb->flags & FLAG_MASK_ASC) {
329 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
330 return MMU_PRIMARY_IDX;
331 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
332 return MMU_SECONDARY_IDX;
333 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
334 return MMU_HOME_IDX;
335 default:
336 tcg_abort();
337 break;
339 #endif
342 static void gen_exception(int excp)
344 TCGv_i32 tmp = tcg_const_i32(excp);
345 gen_helper_exception(cpu_env, tmp);
346 tcg_temp_free_i32(tmp);
349 static void gen_program_exception(DisasContext *s, int code)
351 TCGv_i32 tmp;
353 /* Remember what pgm exeption this was. */
354 tmp = tcg_const_i32(code);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
356 tcg_temp_free_i32(tmp);
358 tmp = tcg_const_i32(s->ilen);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
360 tcg_temp_free_i32(tmp);
362 /* update the psw */
363 update_psw_addr(s);
365 /* Save off cc. */
366 update_cc_op(s);
368 /* Trigger exception. */
369 gen_exception(EXCP_PGM);
372 static inline void gen_illegal_opcode(DisasContext *s)
374 gen_program_exception(s, PGM_OPERATION);
377 static inline void gen_data_exception(uint8_t dxc)
379 TCGv_i32 tmp = tcg_const_i32(dxc);
380 gen_helper_data_exception(cpu_env, tmp);
381 tcg_temp_free_i32(tmp);
384 static inline void gen_trap(DisasContext *s)
386 /* Set DXC to 0xff */
387 gen_data_exception(0xff);
390 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
391 int64_t imm)
393 tcg_gen_addi_i64(dst, src, imm);
394 if (!(s->base.tb->flags & FLAG_MASK_64)) {
395 if (s->base.tb->flags & FLAG_MASK_32) {
396 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
397 } else {
398 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
403 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
405 TCGv_i64 tmp = tcg_temp_new_i64();
408 * Note that d2 is limited to 20 bits, signed. If we crop negative
409 * displacements early we create larger immedate addends.
411 if (b2 && x2) {
412 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
413 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
414 } else if (b2) {
415 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
416 } else if (x2) {
417 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
418 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
419 if (s->base.tb->flags & FLAG_MASK_32) {
420 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
421 } else {
422 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
424 } else {
425 tcg_gen_movi_i64(tmp, d2);
428 return tmp;
431 static inline bool live_cc_data(DisasContext *s)
433 return (s->cc_op != CC_OP_DYNAMIC
434 && s->cc_op != CC_OP_STATIC
435 && s->cc_op > 3);
438 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
445 s->cc_op = CC_OP_CONST0 + val;
448 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
450 if (live_cc_data(s)) {
451 tcg_gen_discard_i64(cc_src);
452 tcg_gen_discard_i64(cc_vr);
454 tcg_gen_mov_i64(cc_dst, dst);
455 s->cc_op = op;
458 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
459 TCGv_i64 dst)
461 if (live_cc_data(s)) {
462 tcg_gen_discard_i64(cc_vr);
464 tcg_gen_mov_i64(cc_src, src);
465 tcg_gen_mov_i64(cc_dst, dst);
466 s->cc_op = op;
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
478 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
480 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
483 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
488 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
493 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
495 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
498 /* CC value is in env->cc_op */
499 static void set_cc_static(DisasContext *s)
501 if (live_cc_data(s)) {
502 tcg_gen_discard_i64(cc_src);
503 tcg_gen_discard_i64(cc_dst);
504 tcg_gen_discard_i64(cc_vr);
506 s->cc_op = CC_OP_STATIC;
509 /* calculates cc into cc_op */
510 static void gen_op_calc_cc(DisasContext *s)
512 TCGv_i32 local_cc_op = NULL;
513 TCGv_i64 dummy = NULL;
515 switch (s->cc_op) {
516 default:
517 dummy = tcg_const_i64(0);
518 /* FALLTHRU */
519 case CC_OP_ADD_64:
520 case CC_OP_ADDU_64:
521 case CC_OP_ADDC_64:
522 case CC_OP_SUB_64:
523 case CC_OP_SUBU_64:
524 case CC_OP_SUBB_64:
525 case CC_OP_ADD_32:
526 case CC_OP_ADDU_32:
527 case CC_OP_ADDC_32:
528 case CC_OP_SUB_32:
529 case CC_OP_SUBU_32:
530 case CC_OP_SUBB_32:
531 local_cc_op = tcg_const_i32(s->cc_op);
532 break;
533 case CC_OP_CONST0:
534 case CC_OP_CONST1:
535 case CC_OP_CONST2:
536 case CC_OP_CONST3:
537 case CC_OP_STATIC:
538 case CC_OP_DYNAMIC:
539 break;
542 switch (s->cc_op) {
543 case CC_OP_CONST0:
544 case CC_OP_CONST1:
545 case CC_OP_CONST2:
546 case CC_OP_CONST3:
547 /* s->cc_op is the cc value */
548 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
549 break;
550 case CC_OP_STATIC:
551 /* env->cc_op already is the cc value */
552 break;
553 case CC_OP_NZ:
554 case CC_OP_ABS_64:
555 case CC_OP_NABS_64:
556 case CC_OP_ABS_32:
557 case CC_OP_NABS_32:
558 case CC_OP_LTGT0_32:
559 case CC_OP_LTGT0_64:
560 case CC_OP_COMP_32:
561 case CC_OP_COMP_64:
562 case CC_OP_NZ_F32:
563 case CC_OP_NZ_F64:
564 case CC_OP_FLOGR:
565 case CC_OP_LCBB:
566 /* 1 argument */
567 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
568 break;
569 case CC_OP_ICM:
570 case CC_OP_LTGT_32:
571 case CC_OP_LTGT_64:
572 case CC_OP_LTUGTU_32:
573 case CC_OP_LTUGTU_64:
574 case CC_OP_TM_32:
575 case CC_OP_TM_64:
576 case CC_OP_SLA_32:
577 case CC_OP_SLA_64:
578 case CC_OP_NZ_F128:
579 case CC_OP_VC:
580 /* 2 arguments */
581 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
582 break;
583 case CC_OP_ADD_64:
584 case CC_OP_ADDU_64:
585 case CC_OP_ADDC_64:
586 case CC_OP_SUB_64:
587 case CC_OP_SUBU_64:
588 case CC_OP_SUBB_64:
589 case CC_OP_ADD_32:
590 case CC_OP_ADDU_32:
591 case CC_OP_ADDC_32:
592 case CC_OP_SUB_32:
593 case CC_OP_SUBU_32:
594 case CC_OP_SUBB_32:
595 /* 3 arguments */
596 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
597 break;
598 case CC_OP_DYNAMIC:
599 /* unknown operation - assume 3 arguments and cc_op in env */
600 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
601 break;
602 default:
603 tcg_abort();
606 if (local_cc_op) {
607 tcg_temp_free_i32(local_cc_op);
609 if (dummy) {
610 tcg_temp_free_i64(dummy);
613 /* We now have cc in cc_op as constant */
614 set_cc_static(s);
617 static bool use_exit_tb(DisasContext *s)
619 return s->base.singlestep_enabled ||
620 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
621 (s->base.tb->flags & FLAG_MASK_PER);
624 static bool use_goto_tb(DisasContext *s, uint64_t dest)
626 if (unlikely(use_exit_tb(s))) {
627 return false;
629 #ifndef CONFIG_USER_ONLY
630 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
631 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
632 #else
633 return true;
634 #endif
637 static void account_noninline_branch(DisasContext *s, int cc_op)
639 #ifdef DEBUG_INLINE_BRANCHES
640 inline_branch_miss[cc_op]++;
641 #endif
644 static void account_inline_branch(DisasContext *s, int cc_op)
646 #ifdef DEBUG_INLINE_BRANCHES
647 inline_branch_hit[cc_op]++;
648 #endif
651 /* Table of mask values to comparison codes, given a comparison as input.
652 For such, CC=3 should not be possible. */
653 static const TCGCond ltgt_cond[16] = {
654 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
655 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
656 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
657 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
658 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
659 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
660 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
661 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
664 /* Table of mask values to comparison codes, given a logic op as input.
665 For such, only CC=0 and CC=1 should be possible. */
666 static const TCGCond nz_cond[16] = {
667 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
668 TCG_COND_NEVER, TCG_COND_NEVER,
669 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
670 TCG_COND_NE, TCG_COND_NE,
671 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
672 TCG_COND_EQ, TCG_COND_EQ,
673 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
674 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
677 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
678 details required to generate a TCG comparison. */
679 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
681 TCGCond cond;
682 enum cc_op old_cc_op = s->cc_op;
684 if (mask == 15 || mask == 0) {
685 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
686 c->u.s32.a = cc_op;
687 c->u.s32.b = cc_op;
688 c->g1 = c->g2 = true;
689 c->is_64 = false;
690 return;
693 /* Find the TCG condition for the mask + cc op. */
694 switch (old_cc_op) {
695 case CC_OP_LTGT0_32:
696 case CC_OP_LTGT0_64:
697 case CC_OP_LTGT_32:
698 case CC_OP_LTGT_64:
699 cond = ltgt_cond[mask];
700 if (cond == TCG_COND_NEVER) {
701 goto do_dynamic;
703 account_inline_branch(s, old_cc_op);
704 break;
706 case CC_OP_LTUGTU_32:
707 case CC_OP_LTUGTU_64:
708 cond = tcg_unsigned_cond(ltgt_cond[mask]);
709 if (cond == TCG_COND_NEVER) {
710 goto do_dynamic;
712 account_inline_branch(s, old_cc_op);
713 break;
715 case CC_OP_NZ:
716 cond = nz_cond[mask];
717 if (cond == TCG_COND_NEVER) {
718 goto do_dynamic;
720 account_inline_branch(s, old_cc_op);
721 break;
723 case CC_OP_TM_32:
724 case CC_OP_TM_64:
725 switch (mask) {
726 case 8:
727 cond = TCG_COND_EQ;
728 break;
729 case 4 | 2 | 1:
730 cond = TCG_COND_NE;
731 break;
732 default:
733 goto do_dynamic;
735 account_inline_branch(s, old_cc_op);
736 break;
738 case CC_OP_ICM:
739 switch (mask) {
740 case 8:
741 cond = TCG_COND_EQ;
742 break;
743 case 4 | 2 | 1:
744 case 4 | 2:
745 cond = TCG_COND_NE;
746 break;
747 default:
748 goto do_dynamic;
750 account_inline_branch(s, old_cc_op);
751 break;
753 case CC_OP_FLOGR:
754 switch (mask & 0xa) {
755 case 8: /* src == 0 -> no one bit found */
756 cond = TCG_COND_EQ;
757 break;
758 case 2: /* src != 0 -> one bit found */
759 cond = TCG_COND_NE;
760 break;
761 default:
762 goto do_dynamic;
764 account_inline_branch(s, old_cc_op);
765 break;
767 case CC_OP_ADDU_32:
768 case CC_OP_ADDU_64:
769 switch (mask) {
770 case 8 | 2: /* vr == 0 */
771 cond = TCG_COND_EQ;
772 break;
773 case 4 | 1: /* vr != 0 */
774 cond = TCG_COND_NE;
775 break;
776 case 8 | 4: /* no carry -> vr >= src */
777 cond = TCG_COND_GEU;
778 break;
779 case 2 | 1: /* carry -> vr < src */
780 cond = TCG_COND_LTU;
781 break;
782 default:
783 goto do_dynamic;
785 account_inline_branch(s, old_cc_op);
786 break;
788 case CC_OP_SUBU_32:
789 case CC_OP_SUBU_64:
790 /* Note that CC=0 is impossible; treat it as dont-care. */
791 switch (mask & 7) {
792 case 2: /* zero -> op1 == op2 */
793 cond = TCG_COND_EQ;
794 break;
795 case 4 | 1: /* !zero -> op1 != op2 */
796 cond = TCG_COND_NE;
797 break;
798 case 4: /* borrow (!carry) -> op1 < op2 */
799 cond = TCG_COND_LTU;
800 break;
801 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
802 cond = TCG_COND_GEU;
803 break;
804 default:
805 goto do_dynamic;
807 account_inline_branch(s, old_cc_op);
808 break;
810 default:
811 do_dynamic:
812 /* Calculate cc value. */
813 gen_op_calc_cc(s);
814 /* FALLTHRU */
816 case CC_OP_STATIC:
817 /* Jump based on CC. We'll load up the real cond below;
818 the assignment here merely avoids a compiler warning. */
819 account_noninline_branch(s, old_cc_op);
820 old_cc_op = CC_OP_STATIC;
821 cond = TCG_COND_NEVER;
822 break;
825 /* Load up the arguments of the comparison. */
826 c->is_64 = true;
827 c->g1 = c->g2 = false;
828 switch (old_cc_op) {
829 case CC_OP_LTGT0_32:
830 c->is_64 = false;
831 c->u.s32.a = tcg_temp_new_i32();
832 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
833 c->u.s32.b = tcg_const_i32(0);
834 break;
835 case CC_OP_LTGT_32:
836 case CC_OP_LTUGTU_32:
837 case CC_OP_SUBU_32:
838 c->is_64 = false;
839 c->u.s32.a = tcg_temp_new_i32();
840 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
841 c->u.s32.b = tcg_temp_new_i32();
842 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
843 break;
845 case CC_OP_LTGT0_64:
846 case CC_OP_NZ:
847 case CC_OP_FLOGR:
848 c->u.s64.a = cc_dst;
849 c->u.s64.b = tcg_const_i64(0);
850 c->g1 = true;
851 break;
852 case CC_OP_LTGT_64:
853 case CC_OP_LTUGTU_64:
854 case CC_OP_SUBU_64:
855 c->u.s64.a = cc_src;
856 c->u.s64.b = cc_dst;
857 c->g1 = c->g2 = true;
858 break;
860 case CC_OP_TM_32:
861 case CC_OP_TM_64:
862 case CC_OP_ICM:
863 c->u.s64.a = tcg_temp_new_i64();
864 c->u.s64.b = tcg_const_i64(0);
865 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
866 break;
868 case CC_OP_ADDU_32:
869 c->is_64 = false;
870 c->u.s32.a = tcg_temp_new_i32();
871 c->u.s32.b = tcg_temp_new_i32();
872 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
873 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
874 tcg_gen_movi_i32(c->u.s32.b, 0);
875 } else {
876 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
878 break;
880 case CC_OP_ADDU_64:
881 c->u.s64.a = cc_vr;
882 c->g1 = true;
883 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
884 c->u.s64.b = tcg_const_i64(0);
885 } else {
886 c->u.s64.b = cc_src;
887 c->g2 = true;
889 break;
891 case CC_OP_STATIC:
892 c->is_64 = false;
893 c->u.s32.a = cc_op;
894 c->g1 = true;
895 switch (mask) {
896 case 0x8 | 0x4 | 0x2: /* cc != 3 */
897 cond = TCG_COND_NE;
898 c->u.s32.b = tcg_const_i32(3);
899 break;
900 case 0x8 | 0x4 | 0x1: /* cc != 2 */
901 cond = TCG_COND_NE;
902 c->u.s32.b = tcg_const_i32(2);
903 break;
904 case 0x8 | 0x2 | 0x1: /* cc != 1 */
905 cond = TCG_COND_NE;
906 c->u.s32.b = tcg_const_i32(1);
907 break;
908 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
909 cond = TCG_COND_EQ;
910 c->g1 = false;
911 c->u.s32.a = tcg_temp_new_i32();
912 c->u.s32.b = tcg_const_i32(0);
913 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
914 break;
915 case 0x8 | 0x4: /* cc < 2 */
916 cond = TCG_COND_LTU;
917 c->u.s32.b = tcg_const_i32(2);
918 break;
919 case 0x8: /* cc == 0 */
920 cond = TCG_COND_EQ;
921 c->u.s32.b = tcg_const_i32(0);
922 break;
923 case 0x4 | 0x2 | 0x1: /* cc != 0 */
924 cond = TCG_COND_NE;
925 c->u.s32.b = tcg_const_i32(0);
926 break;
927 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
928 cond = TCG_COND_NE;
929 c->g1 = false;
930 c->u.s32.a = tcg_temp_new_i32();
931 c->u.s32.b = tcg_const_i32(0);
932 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
933 break;
934 case 0x4: /* cc == 1 */
935 cond = TCG_COND_EQ;
936 c->u.s32.b = tcg_const_i32(1);
937 break;
938 case 0x2 | 0x1: /* cc > 1 */
939 cond = TCG_COND_GTU;
940 c->u.s32.b = tcg_const_i32(1);
941 break;
942 case 0x2: /* cc == 2 */
943 cond = TCG_COND_EQ;
944 c->u.s32.b = tcg_const_i32(2);
945 break;
946 case 0x1: /* cc == 3 */
947 cond = TCG_COND_EQ;
948 c->u.s32.b = tcg_const_i32(3);
949 break;
950 default:
951 /* CC is masked by something else: (8 >> cc) & mask. */
952 cond = TCG_COND_NE;
953 c->g1 = false;
954 c->u.s32.a = tcg_const_i32(8);
955 c->u.s32.b = tcg_const_i32(0);
956 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
957 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
958 break;
960 break;
962 default:
963 abort();
965 c->cond = cond;
968 static void free_compare(DisasCompare *c)
970 if (!c->g1) {
971 if (c->is_64) {
972 tcg_temp_free_i64(c->u.s64.a);
973 } else {
974 tcg_temp_free_i32(c->u.s32.a);
977 if (!c->g2) {
978 if (c->is_64) {
979 tcg_temp_free_i64(c->u.s64.b);
980 } else {
981 tcg_temp_free_i32(c->u.s32.b);
986 /* ====================================================================== */
987 /* Define the insn format enumeration. */
988 #define F0(N) FMT_##N,
989 #define F1(N, X1) F0(N)
990 #define F2(N, X1, X2) F0(N)
991 #define F3(N, X1, X2, X3) F0(N)
992 #define F4(N, X1, X2, X3, X4) F0(N)
993 #define F5(N, X1, X2, X3, X4, X5) F0(N)
994 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
996 typedef enum {
997 #include "insn-format.def"
998 } DisasFormat;
1000 #undef F0
1001 #undef F1
1002 #undef F2
1003 #undef F3
1004 #undef F4
1005 #undef F5
1006 #undef F6
1008 /* Define a structure to hold the decoded fields. We'll store each inside
1009 an array indexed by an enum. In order to conserve memory, we'll arrange
1010 for fields that do not exist at the same time to overlap, thus the "C"
1011 for compact. For checking purposes there is an "O" for original index
1012 as well that will be applied to availability bitmaps. */
1014 enum DisasFieldIndexO {
1015 FLD_O_r1,
1016 FLD_O_r2,
1017 FLD_O_r3,
1018 FLD_O_m1,
1019 FLD_O_m3,
1020 FLD_O_m4,
1021 FLD_O_m5,
1022 FLD_O_m6,
1023 FLD_O_b1,
1024 FLD_O_b2,
1025 FLD_O_b4,
1026 FLD_O_d1,
1027 FLD_O_d2,
1028 FLD_O_d4,
1029 FLD_O_x2,
1030 FLD_O_l1,
1031 FLD_O_l2,
1032 FLD_O_i1,
1033 FLD_O_i2,
1034 FLD_O_i3,
1035 FLD_O_i4,
1036 FLD_O_i5,
1037 FLD_O_v1,
1038 FLD_O_v2,
1039 FLD_O_v3,
1040 FLD_O_v4,
1043 enum DisasFieldIndexC {
1044 FLD_C_r1 = 0,
1045 FLD_C_m1 = 0,
1046 FLD_C_b1 = 0,
1047 FLD_C_i1 = 0,
1048 FLD_C_v1 = 0,
1050 FLD_C_r2 = 1,
1051 FLD_C_b2 = 1,
1052 FLD_C_i2 = 1,
1054 FLD_C_r3 = 2,
1055 FLD_C_m3 = 2,
1056 FLD_C_i3 = 2,
1057 FLD_C_v3 = 2,
1059 FLD_C_m4 = 3,
1060 FLD_C_b4 = 3,
1061 FLD_C_i4 = 3,
1062 FLD_C_l1 = 3,
1063 FLD_C_v4 = 3,
1065 FLD_C_i5 = 4,
1066 FLD_C_d1 = 4,
1067 FLD_C_m5 = 4,
1069 FLD_C_d2 = 5,
1070 FLD_C_m6 = 5,
1072 FLD_C_d4 = 6,
1073 FLD_C_x2 = 6,
1074 FLD_C_l2 = 6,
1075 FLD_C_v2 = 6,
1077 NUM_C_FIELD = 7
1080 struct DisasFields {
1081 uint64_t raw_insn;
1082 unsigned op:8;
1083 unsigned op2:8;
1084 unsigned presentC:16;
1085 unsigned int presentO;
1086 int c[NUM_C_FIELD];
1089 /* This is the way fields are to be accessed out of DisasFields. */
1090 #define have_field(S, F) have_field1((S), FLD_O_##F)
1091 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1093 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1095 return (f->presentO >> c) & 1;
1098 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1099 enum DisasFieldIndexC c)
1101 assert(have_field1(f, o));
1102 return f->c[c];
1105 /* Describe the layout of each field in each format. */
1106 typedef struct DisasField {
1107 unsigned int beg:8;
1108 unsigned int size:8;
1109 unsigned int type:2;
1110 unsigned int indexC:6;
1111 enum DisasFieldIndexO indexO:8;
1112 } DisasField;
1114 typedef struct DisasFormatInfo {
1115 DisasField op[NUM_C_FIELD];
1116 } DisasFormatInfo;
1118 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1119 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1120 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1121 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1122 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1123 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1125 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1126 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1127 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1128 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1129 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1130 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1131 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1132 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1134 #define F0(N) { { } },
1135 #define F1(N, X1) { { X1 } },
1136 #define F2(N, X1, X2) { { X1, X2 } },
1137 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1138 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1139 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1140 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1142 static const DisasFormatInfo format_info[] = {
1143 #include "insn-format.def"
1146 #undef F0
1147 #undef F1
1148 #undef F2
1149 #undef F3
1150 #undef F4
1151 #undef F5
1152 #undef F6
1153 #undef R
1154 #undef M
1155 #undef V
1156 #undef BD
1157 #undef BXD
1158 #undef BDL
1159 #undef BXDL
1160 #undef I
1161 #undef L
1163 /* Generally, we'll extract operands into this structures, operate upon
1164 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1165 of routines below for more details. */
1166 typedef struct {
1167 bool g_out, g_out2, g_in1, g_in2;
1168 TCGv_i64 out, out2, in1, in2;
1169 TCGv_i64 addr1;
1170 } DisasOps;
1172 /* Instructions can place constraints on their operands, raising specification
1173 exceptions if they are violated. To make this easy to automate, each "in1",
1174 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1175 of the following, or 0. To make this easy to document, we'll put the
1176 SPEC_<name> defines next to <name>. */
1178 #define SPEC_r1_even 1
1179 #define SPEC_r2_even 2
1180 #define SPEC_r3_even 4
1181 #define SPEC_r1_f128 8
1182 #define SPEC_r2_f128 16
1184 /* Return values from translate_one, indicating the state of the TB. */
1186 /* We are not using a goto_tb (for whatever reason), but have updated
1187 the PC (for whatever reason), so there's no need to do it again on
1188 exiting the TB. */
1189 #define DISAS_PC_UPDATED DISAS_TARGET_0
1191 /* We have emitted one or more goto_tb. No fixup required. */
1192 #define DISAS_GOTO_TB DISAS_TARGET_1
1194 /* We have updated the PC and CC values. */
1195 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1197 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1198 updated the PC for the next instruction to be executed. */
1199 #define DISAS_PC_STALE DISAS_TARGET_3
1201 /* We are exiting the TB to the main loop. */
1202 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1205 /* Instruction flags */
1206 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1207 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1208 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1209 #define IF_BFP 0x0008 /* binary floating point instruction */
1210 #define IF_DFP 0x0010 /* decimal floating point instruction */
1211 #define IF_PRIV 0x0020 /* privileged instruction */
1212 #define IF_VEC 0x0040 /* vector instruction */
1214 struct DisasInsn {
1215 unsigned opc:16;
1216 unsigned flags:16;
1217 DisasFormat fmt:8;
1218 unsigned fac:8;
1219 unsigned spec:8;
1221 const char *name;
1223 /* Pre-process arguments before HELP_OP. */
1224 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1225 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1226 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1229 * Post-process output after HELP_OP.
1230 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1232 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1233 void (*help_cout)(DisasContext *, DisasOps *);
1235 /* Implement the operation itself. */
1236 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1238 uint64_t data;
1241 /* ====================================================================== */
1242 /* Miscellaneous helpers, used by several operations. */
1244 static void help_l2_shift(DisasContext *s, DisasFields *f,
1245 DisasOps *o, int mask)
1247 int b2 = get_field(f, b2);
1248 int d2 = get_field(f, d2);
1250 if (b2 == 0) {
1251 o->in2 = tcg_const_i64(d2 & mask);
1252 } else {
1253 o->in2 = get_address(s, 0, b2, d2);
1254 tcg_gen_andi_i64(o->in2, o->in2, mask);
1258 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1260 if (dest == s->pc_tmp) {
1261 per_branch(s, true);
1262 return DISAS_NEXT;
1264 if (use_goto_tb(s, dest)) {
1265 update_cc_op(s);
1266 per_breaking_event(s);
1267 tcg_gen_goto_tb(0);
1268 tcg_gen_movi_i64(psw_addr, dest);
1269 tcg_gen_exit_tb(s->base.tb, 0);
1270 return DISAS_GOTO_TB;
1271 } else {
1272 tcg_gen_movi_i64(psw_addr, dest);
1273 per_branch(s, false);
1274 return DISAS_PC_UPDATED;
1278 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1279 bool is_imm, int imm, TCGv_i64 cdest)
1281 DisasJumpType ret;
1282 uint64_t dest = s->base.pc_next + 2 * imm;
1283 TCGLabel *lab;
1285 /* Take care of the special cases first. */
1286 if (c->cond == TCG_COND_NEVER) {
1287 ret = DISAS_NEXT;
1288 goto egress;
1290 if (is_imm) {
1291 if (dest == s->pc_tmp) {
1292 /* Branch to next. */
1293 per_branch(s, true);
1294 ret = DISAS_NEXT;
1295 goto egress;
1297 if (c->cond == TCG_COND_ALWAYS) {
1298 ret = help_goto_direct(s, dest);
1299 goto egress;
1301 } else {
1302 if (!cdest) {
1303 /* E.g. bcr %r0 -> no branch. */
1304 ret = DISAS_NEXT;
1305 goto egress;
1307 if (c->cond == TCG_COND_ALWAYS) {
1308 tcg_gen_mov_i64(psw_addr, cdest);
1309 per_branch(s, false);
1310 ret = DISAS_PC_UPDATED;
1311 goto egress;
1315 if (use_goto_tb(s, s->pc_tmp)) {
1316 if (is_imm && use_goto_tb(s, dest)) {
1317 /* Both exits can use goto_tb. */
1318 update_cc_op(s);
1320 lab = gen_new_label();
1321 if (c->is_64) {
1322 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1323 } else {
1324 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1327 /* Branch not taken. */
1328 tcg_gen_goto_tb(0);
1329 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1330 tcg_gen_exit_tb(s->base.tb, 0);
1332 /* Branch taken. */
1333 gen_set_label(lab);
1334 per_breaking_event(s);
1335 tcg_gen_goto_tb(1);
1336 tcg_gen_movi_i64(psw_addr, dest);
1337 tcg_gen_exit_tb(s->base.tb, 1);
1339 ret = DISAS_GOTO_TB;
1340 } else {
1341 /* Fallthru can use goto_tb, but taken branch cannot. */
1342 /* Store taken branch destination before the brcond. This
1343 avoids having to allocate a new local temp to hold it.
1344 We'll overwrite this in the not taken case anyway. */
1345 if (!is_imm) {
1346 tcg_gen_mov_i64(psw_addr, cdest);
1349 lab = gen_new_label();
1350 if (c->is_64) {
1351 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1352 } else {
1353 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1356 /* Branch not taken. */
1357 update_cc_op(s);
1358 tcg_gen_goto_tb(0);
1359 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1360 tcg_gen_exit_tb(s->base.tb, 0);
1362 gen_set_label(lab);
1363 if (is_imm) {
1364 tcg_gen_movi_i64(psw_addr, dest);
1366 per_breaking_event(s);
1367 ret = DISAS_PC_UPDATED;
1369 } else {
1370 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1371 Most commonly we're single-stepping or some other condition that
1372 disables all use of goto_tb. Just update the PC and exit. */
1374 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1375 if (is_imm) {
1376 cdest = tcg_const_i64(dest);
1379 if (c->is_64) {
1380 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1381 cdest, next);
1382 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1383 } else {
1384 TCGv_i32 t0 = tcg_temp_new_i32();
1385 TCGv_i64 t1 = tcg_temp_new_i64();
1386 TCGv_i64 z = tcg_const_i64(0);
1387 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1388 tcg_gen_extu_i32_i64(t1, t0);
1389 tcg_temp_free_i32(t0);
1390 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1391 per_branch_cond(s, TCG_COND_NE, t1, z);
1392 tcg_temp_free_i64(t1);
1393 tcg_temp_free_i64(z);
1396 if (is_imm) {
1397 tcg_temp_free_i64(cdest);
1399 tcg_temp_free_i64(next);
1401 ret = DISAS_PC_UPDATED;
1404 egress:
1405 free_compare(c);
1406 return ret;
1409 /* ====================================================================== */
1410 /* The operations. These perform the bulk of the work for any insn,
1411 usually after the operands have been loaded and output initialized. */
1413 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1415 tcg_gen_abs_i64(o->out, o->in2);
1416 return DISAS_NEXT;
1419 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1421 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1422 return DISAS_NEXT;
1425 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1427 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1428 return DISAS_NEXT;
1431 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1433 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1434 tcg_gen_mov_i64(o->out2, o->in2);
1435 return DISAS_NEXT;
1438 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1440 tcg_gen_add_i64(o->out, o->in1, o->in2);
1441 return DISAS_NEXT;
1444 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1446 DisasCompare cmp;
1447 TCGv_i64 carry;
1449 tcg_gen_add_i64(o->out, o->in1, o->in2);
1451 /* The carry flag is the msb of CC, therefore the branch mask that would
1452 create that comparison is 3. Feeding the generated comparison to
1453 setcond produces the carry flag that we desire. */
1454 disas_jcc(s, &cmp, 3);
1455 carry = tcg_temp_new_i64();
1456 if (cmp.is_64) {
1457 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1458 } else {
1459 TCGv_i32 t = tcg_temp_new_i32();
1460 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1461 tcg_gen_extu_i32_i64(carry, t);
1462 tcg_temp_free_i32(t);
1464 free_compare(&cmp);
1466 tcg_gen_add_i64(o->out, o->out, carry);
1467 tcg_temp_free_i64(carry);
1468 return DISAS_NEXT;
1471 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1473 o->in1 = tcg_temp_new_i64();
1475 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1476 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1477 } else {
1478 /* Perform the atomic addition in memory. */
1479 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1480 s->insn->data);
1483 /* Recompute also for atomic case: needed for setting CC. */
1484 tcg_gen_add_i64(o->out, o->in1, o->in2);
1486 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1487 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1489 return DISAS_NEXT;
1492 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1494 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1495 return DISAS_NEXT;
1498 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1500 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1501 return DISAS_NEXT;
1504 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1506 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1507 return_low128(o->out2);
1508 return DISAS_NEXT;
1511 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1513 tcg_gen_and_i64(o->out, o->in1, o->in2);
1514 return DISAS_NEXT;
1517 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1519 int shift = s->insn->data & 0xff;
1520 int size = s->insn->data >> 8;
1521 uint64_t mask = ((1ull << size) - 1) << shift;
1523 assert(!o->g_in2);
1524 tcg_gen_shli_i64(o->in2, o->in2, shift);
1525 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1526 tcg_gen_and_i64(o->out, o->in1, o->in2);
1528 /* Produce the CC from only the bits manipulated. */
1529 tcg_gen_andi_i64(cc_dst, o->out, mask);
1530 set_cc_nz_u64(s, cc_dst);
1531 return DISAS_NEXT;
1534 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1536 o->in1 = tcg_temp_new_i64();
1538 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1539 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1540 } else {
1541 /* Perform the atomic operation in memory. */
1542 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1543 s->insn->data);
1546 /* Recompute also for atomic case: needed for setting CC. */
1547 tcg_gen_and_i64(o->out, o->in1, o->in2);
1549 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1550 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1552 return DISAS_NEXT;
1555 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1557 pc_to_link_info(o->out, s, s->pc_tmp);
1558 if (o->in2) {
1559 tcg_gen_mov_i64(psw_addr, o->in2);
1560 per_branch(s, false);
1561 return DISAS_PC_UPDATED;
1562 } else {
1563 return DISAS_NEXT;
1567 static void save_link_info(DisasContext *s, DisasOps *o)
1569 TCGv_i64 t;
1571 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1572 pc_to_link_info(o->out, s, s->pc_tmp);
1573 return;
1575 gen_op_calc_cc(s);
1576 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1577 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1578 t = tcg_temp_new_i64();
1579 tcg_gen_shri_i64(t, psw_mask, 16);
1580 tcg_gen_andi_i64(t, t, 0x0f000000);
1581 tcg_gen_or_i64(o->out, o->out, t);
1582 tcg_gen_extu_i32_i64(t, cc_op);
1583 tcg_gen_shli_i64(t, t, 28);
1584 tcg_gen_or_i64(o->out, o->out, t);
1585 tcg_temp_free_i64(t);
1588 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1590 save_link_info(s, o);
1591 if (o->in2) {
1592 tcg_gen_mov_i64(psw_addr, o->in2);
1593 per_branch(s, false);
1594 return DISAS_PC_UPDATED;
1595 } else {
1596 return DISAS_NEXT;
1600 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1602 pc_to_link_info(o->out, s, s->pc_tmp);
1603 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1606 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1608 int m1 = get_field(s->fields, m1);
1609 bool is_imm = have_field(s->fields, i2);
1610 int imm = is_imm ? get_field(s->fields, i2) : 0;
1611 DisasCompare c;
1613 /* BCR with R2 = 0 causes no branching */
1614 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1615 if (m1 == 14) {
1616 /* Perform serialization */
1617 /* FIXME: check for fast-BCR-serialization facility */
1618 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1620 if (m1 == 15) {
1621 /* Perform serialization */
1622 /* FIXME: perform checkpoint-synchronisation */
1623 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1625 return DISAS_NEXT;
1628 disas_jcc(s, &c, m1);
1629 return help_branch(s, &c, is_imm, imm, o->in2);
1632 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1634 int r1 = get_field(s->fields, r1);
1635 bool is_imm = have_field(s->fields, i2);
1636 int imm = is_imm ? get_field(s->fields, i2) : 0;
1637 DisasCompare c;
1638 TCGv_i64 t;
1640 c.cond = TCG_COND_NE;
1641 c.is_64 = false;
1642 c.g1 = false;
1643 c.g2 = false;
1645 t = tcg_temp_new_i64();
1646 tcg_gen_subi_i64(t, regs[r1], 1);
1647 store_reg32_i64(r1, t);
1648 c.u.s32.a = tcg_temp_new_i32();
1649 c.u.s32.b = tcg_const_i32(0);
1650 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1651 tcg_temp_free_i64(t);
1653 return help_branch(s, &c, is_imm, imm, o->in2);
1656 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1658 int r1 = get_field(s->fields, r1);
1659 int imm = get_field(s->fields, i2);
1660 DisasCompare c;
1661 TCGv_i64 t;
1663 c.cond = TCG_COND_NE;
1664 c.is_64 = false;
1665 c.g1 = false;
1666 c.g2 = false;
1668 t = tcg_temp_new_i64();
1669 tcg_gen_shri_i64(t, regs[r1], 32);
1670 tcg_gen_subi_i64(t, t, 1);
1671 store_reg32h_i64(r1, t);
1672 c.u.s32.a = tcg_temp_new_i32();
1673 c.u.s32.b = tcg_const_i32(0);
1674 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1675 tcg_temp_free_i64(t);
1677 return help_branch(s, &c, 1, imm, o->in2);
1680 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1682 int r1 = get_field(s->fields, r1);
1683 bool is_imm = have_field(s->fields, i2);
1684 int imm = is_imm ? get_field(s->fields, i2) : 0;
1685 DisasCompare c;
1687 c.cond = TCG_COND_NE;
1688 c.is_64 = true;
1689 c.g1 = true;
1690 c.g2 = false;
1692 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1693 c.u.s64.a = regs[r1];
1694 c.u.s64.b = tcg_const_i64(0);
1696 return help_branch(s, &c, is_imm, imm, o->in2);
1699 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1701 int r1 = get_field(s->fields, r1);
1702 int r3 = get_field(s->fields, r3);
1703 bool is_imm = have_field(s->fields, i2);
1704 int imm = is_imm ? get_field(s->fields, i2) : 0;
1705 DisasCompare c;
1706 TCGv_i64 t;
1708 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1709 c.is_64 = false;
1710 c.g1 = false;
1711 c.g2 = false;
1713 t = tcg_temp_new_i64();
1714 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1715 c.u.s32.a = tcg_temp_new_i32();
1716 c.u.s32.b = tcg_temp_new_i32();
1717 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1718 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1719 store_reg32_i64(r1, t);
1720 tcg_temp_free_i64(t);
1722 return help_branch(s, &c, is_imm, imm, o->in2);
1725 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1727 int r1 = get_field(s->fields, r1);
1728 int r3 = get_field(s->fields, r3);
1729 bool is_imm = have_field(s->fields, i2);
1730 int imm = is_imm ? get_field(s->fields, i2) : 0;
1731 DisasCompare c;
1733 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1734 c.is_64 = true;
1736 if (r1 == (r3 | 1)) {
1737 c.u.s64.b = load_reg(r3 | 1);
1738 c.g2 = false;
1739 } else {
1740 c.u.s64.b = regs[r3 | 1];
1741 c.g2 = true;
1744 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1745 c.u.s64.a = regs[r1];
1746 c.g1 = true;
1748 return help_branch(s, &c, is_imm, imm, o->in2);
1751 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1753 int imm, m3 = get_field(s->fields, m3);
1754 bool is_imm;
1755 DisasCompare c;
1757 c.cond = ltgt_cond[m3];
1758 if (s->insn->data) {
1759 c.cond = tcg_unsigned_cond(c.cond);
1761 c.is_64 = c.g1 = c.g2 = true;
1762 c.u.s64.a = o->in1;
1763 c.u.s64.b = o->in2;
1765 is_imm = have_field(s->fields, i4);
1766 if (is_imm) {
1767 imm = get_field(s->fields, i4);
1768 } else {
1769 imm = 0;
1770 o->out = get_address(s, 0, get_field(s->fields, b4),
1771 get_field(s->fields, d4));
1774 return help_branch(s, &c, is_imm, imm, o->out);
1777 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1779 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1780 set_cc_static(s);
1781 return DISAS_NEXT;
1784 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1786 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1787 set_cc_static(s);
1788 return DISAS_NEXT;
1791 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1793 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1794 set_cc_static(s);
1795 return DISAS_NEXT;
1798 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1799 bool m4_with_fpe)
1801 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1802 uint8_t m3 = get_field(s->fields, m3);
1803 uint8_t m4 = get_field(s->fields, m4);
1805 /* m3 field was introduced with FPE */
1806 if (!fpe && m3_with_fpe) {
1807 m3 = 0;
1809 /* m4 field was introduced with FPE */
1810 if (!fpe && m4_with_fpe) {
1811 m4 = 0;
1814 /* Check for valid rounding modes. Mode 3 was introduced later. */
1815 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1816 gen_program_exception(s, PGM_SPECIFICATION);
1817 return NULL;
1820 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1823 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1825 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1827 if (!m34) {
1828 return DISAS_NORETURN;
1830 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1831 tcg_temp_free_i32(m34);
1832 gen_set_cc_nz_f32(s, o->in2);
1833 return DISAS_NEXT;
1836 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1838 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1840 if (!m34) {
1841 return DISAS_NORETURN;
1843 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1844 tcg_temp_free_i32(m34);
1845 gen_set_cc_nz_f64(s, o->in2);
1846 return DISAS_NEXT;
1849 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1851 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1853 if (!m34) {
1854 return DISAS_NORETURN;
1856 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1857 tcg_temp_free_i32(m34);
1858 gen_set_cc_nz_f128(s, o->in1, o->in2);
1859 return DISAS_NEXT;
1862 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1864 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1866 if (!m34) {
1867 return DISAS_NORETURN;
1869 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1870 tcg_temp_free_i32(m34);
1871 gen_set_cc_nz_f32(s, o->in2);
1872 return DISAS_NEXT;
1875 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1877 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1879 if (!m34) {
1880 return DISAS_NORETURN;
1882 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1883 tcg_temp_free_i32(m34);
1884 gen_set_cc_nz_f64(s, o->in2);
1885 return DISAS_NEXT;
1888 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1890 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1892 if (!m34) {
1893 return DISAS_NORETURN;
1895 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1896 tcg_temp_free_i32(m34);
1897 gen_set_cc_nz_f128(s, o->in1, o->in2);
1898 return DISAS_NEXT;
1901 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1903 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1905 if (!m34) {
1906 return DISAS_NORETURN;
1908 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1909 tcg_temp_free_i32(m34);
1910 gen_set_cc_nz_f32(s, o->in2);
1911 return DISAS_NEXT;
1914 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1916 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1918 if (!m34) {
1919 return DISAS_NORETURN;
1921 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1922 tcg_temp_free_i32(m34);
1923 gen_set_cc_nz_f64(s, o->in2);
1924 return DISAS_NEXT;
1927 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1929 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1931 if (!m34) {
1932 return DISAS_NORETURN;
1934 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1935 tcg_temp_free_i32(m34);
1936 gen_set_cc_nz_f128(s, o->in1, o->in2);
1937 return DISAS_NEXT;
1940 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1942 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1944 if (!m34) {
1945 return DISAS_NORETURN;
1947 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1948 tcg_temp_free_i32(m34);
1949 gen_set_cc_nz_f32(s, o->in2);
1950 return DISAS_NEXT;
1953 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1955 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1957 if (!m34) {
1958 return DISAS_NORETURN;
1960 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1961 tcg_temp_free_i32(m34);
1962 gen_set_cc_nz_f64(s, o->in2);
1963 return DISAS_NEXT;
1966 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1968 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1970 if (!m34) {
1971 return DISAS_NORETURN;
1973 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1974 tcg_temp_free_i32(m34);
1975 gen_set_cc_nz_f128(s, o->in1, o->in2);
1976 return DISAS_NEXT;
1979 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1981 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1983 if (!m34) {
1984 return DISAS_NORETURN;
1986 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1987 tcg_temp_free_i32(m34);
1988 return DISAS_NEXT;
1991 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1993 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1995 if (!m34) {
1996 return DISAS_NORETURN;
1998 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1999 tcg_temp_free_i32(m34);
2000 return DISAS_NEXT;
2003 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2005 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2007 if (!m34) {
2008 return DISAS_NORETURN;
2010 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2011 tcg_temp_free_i32(m34);
2012 return_low128(o->out2);
2013 return DISAS_NEXT;
2016 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2018 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2020 if (!m34) {
2021 return DISAS_NORETURN;
2023 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2024 tcg_temp_free_i32(m34);
2025 return DISAS_NEXT;
2028 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2030 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2032 if (!m34) {
2033 return DISAS_NORETURN;
2035 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2036 tcg_temp_free_i32(m34);
2037 return DISAS_NEXT;
2040 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2042 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2044 if (!m34) {
2045 return DISAS_NORETURN;
2047 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2048 tcg_temp_free_i32(m34);
2049 return_low128(o->out2);
2050 return DISAS_NEXT;
2053 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2055 int r2 = get_field(s->fields, r2);
2056 TCGv_i64 len = tcg_temp_new_i64();
2058 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2059 set_cc_static(s);
2060 return_low128(o->out);
2062 tcg_gen_add_i64(regs[r2], regs[r2], len);
2063 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2064 tcg_temp_free_i64(len);
2066 return DISAS_NEXT;
2069 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2071 int l = get_field(s->fields, l1);
2072 TCGv_i32 vl;
2074 switch (l + 1) {
2075 case 1:
2076 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2077 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2078 break;
2079 case 2:
2080 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2081 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2082 break;
2083 case 4:
2084 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2085 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2086 break;
2087 case 8:
2088 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2089 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2090 break;
2091 default:
2092 vl = tcg_const_i32(l);
2093 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2094 tcg_temp_free_i32(vl);
2095 set_cc_static(s);
2096 return DISAS_NEXT;
2098 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2099 return DISAS_NEXT;
2102 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2104 int r1 = get_field(s->fields, r1);
2105 int r2 = get_field(s->fields, r2);
2106 TCGv_i32 t1, t2;
2108 /* r1 and r2 must be even. */
2109 if (r1 & 1 || r2 & 1) {
2110 gen_program_exception(s, PGM_SPECIFICATION);
2111 return DISAS_NORETURN;
2114 t1 = tcg_const_i32(r1);
2115 t2 = tcg_const_i32(r2);
2116 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2117 tcg_temp_free_i32(t1);
2118 tcg_temp_free_i32(t2);
2119 set_cc_static(s);
2120 return DISAS_NEXT;
2123 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2125 int r1 = get_field(s->fields, r1);
2126 int r3 = get_field(s->fields, r3);
2127 TCGv_i32 t1, t3;
2129 /* r1 and r3 must be even. */
2130 if (r1 & 1 || r3 & 1) {
2131 gen_program_exception(s, PGM_SPECIFICATION);
2132 return DISAS_NORETURN;
2135 t1 = tcg_const_i32(r1);
2136 t3 = tcg_const_i32(r3);
2137 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2138 tcg_temp_free_i32(t1);
2139 tcg_temp_free_i32(t3);
2140 set_cc_static(s);
2141 return DISAS_NEXT;
2144 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2146 int r1 = get_field(s->fields, r1);
2147 int r3 = get_field(s->fields, r3);
2148 TCGv_i32 t1, t3;
2150 /* r1 and r3 must be even. */
2151 if (r1 & 1 || r3 & 1) {
2152 gen_program_exception(s, PGM_SPECIFICATION);
2153 return DISAS_NORETURN;
2156 t1 = tcg_const_i32(r1);
2157 t3 = tcg_const_i32(r3);
2158 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2159 tcg_temp_free_i32(t1);
2160 tcg_temp_free_i32(t3);
2161 set_cc_static(s);
2162 return DISAS_NEXT;
2165 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2167 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2168 TCGv_i32 t1 = tcg_temp_new_i32();
2169 tcg_gen_extrl_i64_i32(t1, o->in1);
2170 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2171 set_cc_static(s);
2172 tcg_temp_free_i32(t1);
2173 tcg_temp_free_i32(m3);
2174 return DISAS_NEXT;
2177 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2179 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2180 set_cc_static(s);
2181 return_low128(o->in2);
2182 return DISAS_NEXT;
2185 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2187 TCGv_i64 t = tcg_temp_new_i64();
2188 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2189 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2190 tcg_gen_or_i64(o->out, o->out, t);
2191 tcg_temp_free_i64(t);
2192 return DISAS_NEXT;
2195 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2197 int d2 = get_field(s->fields, d2);
2198 int b2 = get_field(s->fields, b2);
2199 TCGv_i64 addr, cc;
2201 /* Note that in1 = R3 (new value) and
2202 in2 = (zero-extended) R1 (expected value). */
2204 addr = get_address(s, 0, b2, d2);
2205 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2206 get_mem_index(s), s->insn->data | MO_ALIGN);
2207 tcg_temp_free_i64(addr);
2209 /* Are the memory and expected values (un)equal? Note that this setcond
2210 produces the output CC value, thus the NE sense of the test. */
2211 cc = tcg_temp_new_i64();
2212 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2213 tcg_gen_extrl_i64_i32(cc_op, cc);
2214 tcg_temp_free_i64(cc);
2215 set_cc_static(s);
2217 return DISAS_NEXT;
2220 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2222 int r1 = get_field(s->fields, r1);
2223 int r3 = get_field(s->fields, r3);
2224 int d2 = get_field(s->fields, d2);
2225 int b2 = get_field(s->fields, b2);
2226 DisasJumpType ret = DISAS_NEXT;
2227 TCGv_i64 addr;
2228 TCGv_i32 t_r1, t_r3;
2230 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2231 addr = get_address(s, 0, b2, d2);
2232 t_r1 = tcg_const_i32(r1);
2233 t_r3 = tcg_const_i32(r3);
2234 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2235 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2236 } else if (HAVE_CMPXCHG128) {
2237 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2238 } else {
2239 gen_helper_exit_atomic(cpu_env);
2240 ret = DISAS_NORETURN;
2242 tcg_temp_free_i64(addr);
2243 tcg_temp_free_i32(t_r1);
2244 tcg_temp_free_i32(t_r3);
2246 set_cc_static(s);
2247 return ret;
2250 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2252 int r3 = get_field(s->fields, r3);
2253 TCGv_i32 t_r3 = tcg_const_i32(r3);
2255 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2256 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2257 } else {
2258 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2260 tcg_temp_free_i32(t_r3);
2262 set_cc_static(s);
2263 return DISAS_NEXT;
2266 #ifndef CONFIG_USER_ONLY
2267 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2269 MemOp mop = s->insn->data;
2270 TCGv_i64 addr, old, cc;
2271 TCGLabel *lab = gen_new_label();
2273 /* Note that in1 = R1 (zero-extended expected value),
2274 out = R1 (original reg), out2 = R1+1 (new value). */
2276 addr = tcg_temp_new_i64();
2277 old = tcg_temp_new_i64();
2278 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2279 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2280 get_mem_index(s), mop | MO_ALIGN);
2281 tcg_temp_free_i64(addr);
2283 /* Are the memory and expected values (un)equal? */
2284 cc = tcg_temp_new_i64();
2285 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2286 tcg_gen_extrl_i64_i32(cc_op, cc);
2288 /* Write back the output now, so that it happens before the
2289 following branch, so that we don't need local temps. */
2290 if ((mop & MO_SIZE) == MO_32) {
2291 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2292 } else {
2293 tcg_gen_mov_i64(o->out, old);
2295 tcg_temp_free_i64(old);
2297 /* If the comparison was equal, and the LSB of R2 was set,
2298 then we need to flush the TLB (for all cpus). */
2299 tcg_gen_xori_i64(cc, cc, 1);
2300 tcg_gen_and_i64(cc, cc, o->in2);
2301 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2302 tcg_temp_free_i64(cc);
2304 gen_helper_purge(cpu_env);
2305 gen_set_label(lab);
2307 return DISAS_NEXT;
2309 #endif
2311 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2313 TCGv_i64 t1 = tcg_temp_new_i64();
2314 TCGv_i32 t2 = tcg_temp_new_i32();
2315 tcg_gen_extrl_i64_i32(t2, o->in1);
2316 gen_helper_cvd(t1, t2);
2317 tcg_temp_free_i32(t2);
2318 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2319 tcg_temp_free_i64(t1);
2320 return DISAS_NEXT;
2323 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2325 int m3 = get_field(s->fields, m3);
2326 TCGLabel *lab = gen_new_label();
2327 TCGCond c;
2329 c = tcg_invert_cond(ltgt_cond[m3]);
2330 if (s->insn->data) {
2331 c = tcg_unsigned_cond(c);
2333 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2335 /* Trap. */
2336 gen_trap(s);
2338 gen_set_label(lab);
2339 return DISAS_NEXT;
2342 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2344 int m3 = get_field(s->fields, m3);
2345 int r1 = get_field(s->fields, r1);
2346 int r2 = get_field(s->fields, r2);
2347 TCGv_i32 tr1, tr2, chk;
2349 /* R1 and R2 must both be even. */
2350 if ((r1 | r2) & 1) {
2351 gen_program_exception(s, PGM_SPECIFICATION);
2352 return DISAS_NORETURN;
2354 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2355 m3 = 0;
2358 tr1 = tcg_const_i32(r1);
2359 tr2 = tcg_const_i32(r2);
2360 chk = tcg_const_i32(m3);
2362 switch (s->insn->data) {
2363 case 12:
2364 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2365 break;
2366 case 14:
2367 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2368 break;
2369 case 21:
2370 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2371 break;
2372 case 24:
2373 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2374 break;
2375 case 41:
2376 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2377 break;
2378 case 42:
2379 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2380 break;
2381 default:
2382 g_assert_not_reached();
2385 tcg_temp_free_i32(tr1);
2386 tcg_temp_free_i32(tr2);
2387 tcg_temp_free_i32(chk);
2388 set_cc_static(s);
2389 return DISAS_NEXT;
2392 #ifndef CONFIG_USER_ONLY
2393 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2395 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2396 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2397 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2399 gen_helper_diag(cpu_env, r1, r3, func_code);
2401 tcg_temp_free_i32(func_code);
2402 tcg_temp_free_i32(r3);
2403 tcg_temp_free_i32(r1);
2404 return DISAS_NEXT;
2406 #endif
2408 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2410 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2411 return_low128(o->out);
2412 return DISAS_NEXT;
2415 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2417 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2418 return_low128(o->out);
2419 return DISAS_NEXT;
2422 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2424 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2425 return_low128(o->out);
2426 return DISAS_NEXT;
2429 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2431 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2432 return_low128(o->out);
2433 return DISAS_NEXT;
2436 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2438 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2439 return DISAS_NEXT;
2442 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2444 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2445 return DISAS_NEXT;
2448 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2450 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2451 return_low128(o->out2);
2452 return DISAS_NEXT;
2455 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2457 int r2 = get_field(s->fields, r2);
2458 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2459 return DISAS_NEXT;
2462 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2464 /* No cache information provided. */
2465 tcg_gen_movi_i64(o->out, -1);
2466 return DISAS_NEXT;
2469 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2471 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2472 return DISAS_NEXT;
2475 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2477 int r1 = get_field(s->fields, r1);
2478 int r2 = get_field(s->fields, r2);
2479 TCGv_i64 t = tcg_temp_new_i64();
2481 /* Note the "subsequently" in the PoO, which implies a defined result
2482 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2483 tcg_gen_shri_i64(t, psw_mask, 32);
2484 store_reg32_i64(r1, t);
2485 if (r2 != 0) {
2486 store_reg32_i64(r2, psw_mask);
2489 tcg_temp_free_i64(t);
2490 return DISAS_NEXT;
2493 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2495 int r1 = get_field(s->fields, r1);
2496 TCGv_i32 ilen;
2497 TCGv_i64 v1;
2499 /* Nested EXECUTE is not allowed. */
2500 if (unlikely(s->ex_value)) {
2501 gen_program_exception(s, PGM_EXECUTE);
2502 return DISAS_NORETURN;
2505 update_psw_addr(s);
2506 update_cc_op(s);
2508 if (r1 == 0) {
2509 v1 = tcg_const_i64(0);
2510 } else {
2511 v1 = regs[r1];
2514 ilen = tcg_const_i32(s->ilen);
2515 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2516 tcg_temp_free_i32(ilen);
2518 if (r1 == 0) {
2519 tcg_temp_free_i64(v1);
2522 return DISAS_PC_CC_UPDATED;
2525 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2527 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2529 if (!m34) {
2530 return DISAS_NORETURN;
2532 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2533 tcg_temp_free_i32(m34);
2534 return DISAS_NEXT;
2537 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2539 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2541 if (!m34) {
2542 return DISAS_NORETURN;
2544 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2545 tcg_temp_free_i32(m34);
2546 return DISAS_NEXT;
2549 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2551 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2553 if (!m34) {
2554 return DISAS_NORETURN;
2556 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2557 return_low128(o->out2);
2558 tcg_temp_free_i32(m34);
2559 return DISAS_NEXT;
2562 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2564 /* We'll use the original input for cc computation, since we get to
2565 compare that against 0, which ought to be better than comparing
2566 the real output against 64. It also lets cc_dst be a convenient
2567 temporary during our computation. */
2568 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2570 /* R1 = IN ? CLZ(IN) : 64. */
2571 tcg_gen_clzi_i64(o->out, o->in2, 64);
2573 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2574 value by 64, which is undefined. But since the shift is 64 iff the
2575 input is zero, we still get the correct result after and'ing. */
2576 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2577 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2578 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2579 return DISAS_NEXT;
2582 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2584 int m3 = get_field(s->fields, m3);
2585 int pos, len, base = s->insn->data;
2586 TCGv_i64 tmp = tcg_temp_new_i64();
2587 uint64_t ccm;
2589 switch (m3) {
2590 case 0xf:
2591 /* Effectively a 32-bit load. */
2592 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2593 len = 32;
2594 goto one_insert;
2596 case 0xc:
2597 case 0x6:
2598 case 0x3:
2599 /* Effectively a 16-bit load. */
2600 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2601 len = 16;
2602 goto one_insert;
2604 case 0x8:
2605 case 0x4:
2606 case 0x2:
2607 case 0x1:
2608 /* Effectively an 8-bit load. */
2609 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2610 len = 8;
2611 goto one_insert;
2613 one_insert:
2614 pos = base + ctz32(m3) * 8;
2615 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2616 ccm = ((1ull << len) - 1) << pos;
2617 break;
2619 default:
2620 /* This is going to be a sequence of loads and inserts. */
2621 pos = base + 32 - 8;
2622 ccm = 0;
2623 while (m3) {
2624 if (m3 & 0x8) {
2625 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2626 tcg_gen_addi_i64(o->in2, o->in2, 1);
2627 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2628 ccm |= 0xff << pos;
2630 m3 = (m3 << 1) & 0xf;
2631 pos -= 8;
2633 break;
2636 tcg_gen_movi_i64(tmp, ccm);
2637 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2638 tcg_temp_free_i64(tmp);
2639 return DISAS_NEXT;
2642 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2644 int shift = s->insn->data & 0xff;
2645 int size = s->insn->data >> 8;
2646 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2647 return DISAS_NEXT;
2650 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2652 TCGv_i64 t1, t2;
2654 gen_op_calc_cc(s);
2655 t1 = tcg_temp_new_i64();
2656 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2657 t2 = tcg_temp_new_i64();
2658 tcg_gen_extu_i32_i64(t2, cc_op);
2659 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2660 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2661 tcg_temp_free_i64(t1);
2662 tcg_temp_free_i64(t2);
2663 return DISAS_NEXT;
2666 #ifndef CONFIG_USER_ONLY
2667 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2669 TCGv_i32 m4;
2671 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2672 m4 = tcg_const_i32(get_field(s->fields, m4));
2673 } else {
2674 m4 = tcg_const_i32(0);
2676 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2677 tcg_temp_free_i32(m4);
2678 return DISAS_NEXT;
2681 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2683 TCGv_i32 m4;
2685 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2686 m4 = tcg_const_i32(get_field(s->fields, m4));
2687 } else {
2688 m4 = tcg_const_i32(0);
2690 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2691 tcg_temp_free_i32(m4);
2692 return DISAS_NEXT;
2695 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2697 gen_helper_iske(o->out, cpu_env, o->in2);
2698 return DISAS_NEXT;
2700 #endif
2702 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2704 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2705 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2706 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2707 TCGv_i32 t_r1, t_r2, t_r3, type;
2709 switch (s->insn->data) {
2710 case S390_FEAT_TYPE_KMCTR:
2711 if (r3 & 1 || !r3) {
2712 gen_program_exception(s, PGM_SPECIFICATION);
2713 return DISAS_NORETURN;
2715 /* FALL THROUGH */
2716 case S390_FEAT_TYPE_PPNO:
2717 case S390_FEAT_TYPE_KMF:
2718 case S390_FEAT_TYPE_KMC:
2719 case S390_FEAT_TYPE_KMO:
2720 case S390_FEAT_TYPE_KM:
2721 if (r1 & 1 || !r1) {
2722 gen_program_exception(s, PGM_SPECIFICATION);
2723 return DISAS_NORETURN;
2725 /* FALL THROUGH */
2726 case S390_FEAT_TYPE_KMAC:
2727 case S390_FEAT_TYPE_KIMD:
2728 case S390_FEAT_TYPE_KLMD:
2729 if (r2 & 1 || !r2) {
2730 gen_program_exception(s, PGM_SPECIFICATION);
2731 return DISAS_NORETURN;
2733 /* FALL THROUGH */
2734 case S390_FEAT_TYPE_PCKMO:
2735 case S390_FEAT_TYPE_PCC:
2736 break;
2737 default:
2738 g_assert_not_reached();
2741 t_r1 = tcg_const_i32(r1);
2742 t_r2 = tcg_const_i32(r2);
2743 t_r3 = tcg_const_i32(r3);
2744 type = tcg_const_i32(s->insn->data);
2745 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2746 set_cc_static(s);
2747 tcg_temp_free_i32(t_r1);
2748 tcg_temp_free_i32(t_r2);
2749 tcg_temp_free_i32(t_r3);
2750 tcg_temp_free_i32(type);
2751 return DISAS_NEXT;
2754 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2756 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2757 set_cc_static(s);
2758 return DISAS_NEXT;
2761 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2763 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2764 set_cc_static(s);
2765 return DISAS_NEXT;
2768 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2770 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2771 set_cc_static(s);
2772 return DISAS_NEXT;
2775 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2777 /* The real output is indeed the original value in memory;
2778 recompute the addition for the computation of CC. */
2779 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2780 s->insn->data | MO_ALIGN);
2781 /* However, we need to recompute the addition for setting CC. */
2782 tcg_gen_add_i64(o->out, o->in1, o->in2);
2783 return DISAS_NEXT;
2786 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2788 /* The real output is indeed the original value in memory;
2789 recompute the addition for the computation of CC. */
2790 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2791 s->insn->data | MO_ALIGN);
2792 /* However, we need to recompute the operation for setting CC. */
2793 tcg_gen_and_i64(o->out, o->in1, o->in2);
2794 return DISAS_NEXT;
2797 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2799 /* The real output is indeed the original value in memory;
2800 recompute the addition for the computation of CC. */
2801 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2802 s->insn->data | MO_ALIGN);
2803 /* However, we need to recompute the operation for setting CC. */
2804 tcg_gen_or_i64(o->out, o->in1, o->in2);
2805 return DISAS_NEXT;
2808 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2810 /* The real output is indeed the original value in memory;
2811 recompute the addition for the computation of CC. */
2812 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2813 s->insn->data | MO_ALIGN);
2814 /* However, we need to recompute the operation for setting CC. */
2815 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2816 return DISAS_NEXT;
2819 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2821 gen_helper_ldeb(o->out, cpu_env, o->in2);
2822 return DISAS_NEXT;
2825 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2827 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2829 if (!m34) {
2830 return DISAS_NORETURN;
2832 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2833 tcg_temp_free_i32(m34);
2834 return DISAS_NEXT;
2837 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2839 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2841 if (!m34) {
2842 return DISAS_NORETURN;
2844 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2845 tcg_temp_free_i32(m34);
2846 return DISAS_NEXT;
2849 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2851 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2853 if (!m34) {
2854 return DISAS_NORETURN;
2856 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2857 tcg_temp_free_i32(m34);
2858 return DISAS_NEXT;
2861 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2863 gen_helper_lxdb(o->out, cpu_env, o->in2);
2864 return_low128(o->out2);
2865 return DISAS_NEXT;
2868 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2870 gen_helper_lxeb(o->out, cpu_env, o->in2);
2871 return_low128(o->out2);
2872 return DISAS_NEXT;
2875 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2877 tcg_gen_shli_i64(o->out, o->in2, 32);
2878 return DISAS_NEXT;
2881 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2883 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2884 return DISAS_NEXT;
2887 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2889 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2890 return DISAS_NEXT;
2893 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2895 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2896 return DISAS_NEXT;
2899 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2901 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2902 return DISAS_NEXT;
2905 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2907 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2908 return DISAS_NEXT;
2911 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2913 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2914 return DISAS_NEXT;
2917 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2919 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2920 return DISAS_NEXT;
2923 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2925 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2926 return DISAS_NEXT;
2929 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2931 TCGLabel *lab = gen_new_label();
2932 store_reg32_i64(get_field(s->fields, r1), o->in2);
2933 /* The value is stored even in case of trap. */
2934 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2935 gen_trap(s);
2936 gen_set_label(lab);
2937 return DISAS_NEXT;
2940 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2942 TCGLabel *lab = gen_new_label();
2943 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2944 /* The value is stored even in case of trap. */
2945 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2946 gen_trap(s);
2947 gen_set_label(lab);
2948 return DISAS_NEXT;
2951 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2953 TCGLabel *lab = gen_new_label();
2954 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2955 /* The value is stored even in case of trap. */
2956 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2957 gen_trap(s);
2958 gen_set_label(lab);
2959 return DISAS_NEXT;
2962 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2964 TCGLabel *lab = gen_new_label();
2965 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2966 /* The value is stored even in case of trap. */
2967 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2968 gen_trap(s);
2969 gen_set_label(lab);
2970 return DISAS_NEXT;
2973 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2975 TCGLabel *lab = gen_new_label();
2976 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2977 /* The value is stored even in case of trap. */
2978 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2979 gen_trap(s);
2980 gen_set_label(lab);
2981 return DISAS_NEXT;
2984 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2986 DisasCompare c;
2988 disas_jcc(s, &c, get_field(s->fields, m3));
2990 if (c.is_64) {
2991 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2992 o->in2, o->in1);
2993 free_compare(&c);
2994 } else {
2995 TCGv_i32 t32 = tcg_temp_new_i32();
2996 TCGv_i64 t, z;
2998 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2999 free_compare(&c);
3001 t = tcg_temp_new_i64();
3002 tcg_gen_extu_i32_i64(t, t32);
3003 tcg_temp_free_i32(t32);
3005 z = tcg_const_i64(0);
3006 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3007 tcg_temp_free_i64(t);
3008 tcg_temp_free_i64(z);
3011 return DISAS_NEXT;
3014 #ifndef CONFIG_USER_ONLY
3015 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3017 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3018 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3019 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3020 tcg_temp_free_i32(r1);
3021 tcg_temp_free_i32(r3);
3022 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3023 return DISAS_PC_STALE_NOCHAIN;
3026 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3028 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3029 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3030 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3031 tcg_temp_free_i32(r1);
3032 tcg_temp_free_i32(r3);
3033 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3034 return DISAS_PC_STALE_NOCHAIN;
3037 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3039 gen_helper_lra(o->out, cpu_env, o->in2);
3040 set_cc_static(s);
3041 return DISAS_NEXT;
3044 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3046 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3047 return DISAS_NEXT;
3050 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3052 TCGv_i64 t1, t2;
3054 per_breaking_event(s);
3056 t1 = tcg_temp_new_i64();
3057 t2 = tcg_temp_new_i64();
3058 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3059 MO_TEUL | MO_ALIGN_8);
3060 tcg_gen_addi_i64(o->in2, o->in2, 4);
3061 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3062 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3063 tcg_gen_shli_i64(t1, t1, 32);
3064 gen_helper_load_psw(cpu_env, t1, t2);
3065 tcg_temp_free_i64(t1);
3066 tcg_temp_free_i64(t2);
3067 return DISAS_NORETURN;
3070 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3072 TCGv_i64 t1, t2;
3074 per_breaking_event(s);
3076 t1 = tcg_temp_new_i64();
3077 t2 = tcg_temp_new_i64();
3078 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3079 MO_TEQ | MO_ALIGN_8);
3080 tcg_gen_addi_i64(o->in2, o->in2, 8);
3081 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3082 gen_helper_load_psw(cpu_env, t1, t2);
3083 tcg_temp_free_i64(t1);
3084 tcg_temp_free_i64(t2);
3085 return DISAS_NORETURN;
3087 #endif
3089 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3091 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3092 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3093 gen_helper_lam(cpu_env, r1, o->in2, r3);
3094 tcg_temp_free_i32(r1);
3095 tcg_temp_free_i32(r3);
3096 return DISAS_NEXT;
3099 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3101 int r1 = get_field(s->fields, r1);
3102 int r3 = get_field(s->fields, r3);
3103 TCGv_i64 t1, t2;
3105 /* Only one register to read. */
3106 t1 = tcg_temp_new_i64();
3107 if (unlikely(r1 == r3)) {
3108 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3109 store_reg32_i64(r1, t1);
3110 tcg_temp_free(t1);
3111 return DISAS_NEXT;
3114 /* First load the values of the first and last registers to trigger
3115 possible page faults. */
3116 t2 = tcg_temp_new_i64();
3117 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3118 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3119 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3120 store_reg32_i64(r1, t1);
3121 store_reg32_i64(r3, t2);
3123 /* Only two registers to read. */
3124 if (((r1 + 1) & 15) == r3) {
3125 tcg_temp_free(t2);
3126 tcg_temp_free(t1);
3127 return DISAS_NEXT;
3130 /* Then load the remaining registers. Page fault can't occur. */
3131 r3 = (r3 - 1) & 15;
3132 tcg_gen_movi_i64(t2, 4);
3133 while (r1 != r3) {
3134 r1 = (r1 + 1) & 15;
3135 tcg_gen_add_i64(o->in2, o->in2, t2);
3136 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3137 store_reg32_i64(r1, t1);
3139 tcg_temp_free(t2);
3140 tcg_temp_free(t1);
3142 return DISAS_NEXT;
3145 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3147 int r1 = get_field(s->fields, r1);
3148 int r3 = get_field(s->fields, r3);
3149 TCGv_i64 t1, t2;
3151 /* Only one register to read. */
3152 t1 = tcg_temp_new_i64();
3153 if (unlikely(r1 == r3)) {
3154 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3155 store_reg32h_i64(r1, t1);
3156 tcg_temp_free(t1);
3157 return DISAS_NEXT;
3160 /* First load the values of the first and last registers to trigger
3161 possible page faults. */
3162 t2 = tcg_temp_new_i64();
3163 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3164 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3165 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3166 store_reg32h_i64(r1, t1);
3167 store_reg32h_i64(r3, t2);
3169 /* Only two registers to read. */
3170 if (((r1 + 1) & 15) == r3) {
3171 tcg_temp_free(t2);
3172 tcg_temp_free(t1);
3173 return DISAS_NEXT;
3176 /* Then load the remaining registers. Page fault can't occur. */
3177 r3 = (r3 - 1) & 15;
3178 tcg_gen_movi_i64(t2, 4);
3179 while (r1 != r3) {
3180 r1 = (r1 + 1) & 15;
3181 tcg_gen_add_i64(o->in2, o->in2, t2);
3182 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3183 store_reg32h_i64(r1, t1);
3185 tcg_temp_free(t2);
3186 tcg_temp_free(t1);
3188 return DISAS_NEXT;
3191 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3193 int r1 = get_field(s->fields, r1);
3194 int r3 = get_field(s->fields, r3);
3195 TCGv_i64 t1, t2;
3197 /* Only one register to read. */
3198 if (unlikely(r1 == r3)) {
3199 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3200 return DISAS_NEXT;
3203 /* First load the values of the first and last registers to trigger
3204 possible page faults. */
3205 t1 = tcg_temp_new_i64();
3206 t2 = tcg_temp_new_i64();
3207 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3208 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3209 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3210 tcg_gen_mov_i64(regs[r1], t1);
3211 tcg_temp_free(t2);
3213 /* Only two registers to read. */
3214 if (((r1 + 1) & 15) == r3) {
3215 tcg_temp_free(t1);
3216 return DISAS_NEXT;
3219 /* Then load the remaining registers. Page fault can't occur. */
3220 r3 = (r3 - 1) & 15;
3221 tcg_gen_movi_i64(t1, 8);
3222 while (r1 != r3) {
3223 r1 = (r1 + 1) & 15;
3224 tcg_gen_add_i64(o->in2, o->in2, t1);
3225 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3227 tcg_temp_free(t1);
3229 return DISAS_NEXT;
3232 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3234 TCGv_i64 a1, a2;
3235 MemOp mop = s->insn->data;
3237 /* In a parallel context, stop the world and single step. */
3238 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3239 update_psw_addr(s);
3240 update_cc_op(s);
3241 gen_exception(EXCP_ATOMIC);
3242 return DISAS_NORETURN;
3245 /* In a serial context, perform the two loads ... */
3246 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3247 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3248 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3249 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3250 tcg_temp_free_i64(a1);
3251 tcg_temp_free_i64(a2);
3253 /* ... and indicate that we performed them while interlocked. */
3254 gen_op_movi_cc(s, 0);
3255 return DISAS_NEXT;
3258 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3260 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3261 gen_helper_lpq(o->out, cpu_env, o->in2);
3262 } else if (HAVE_ATOMIC128) {
3263 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3264 } else {
3265 gen_helper_exit_atomic(cpu_env);
3266 return DISAS_NORETURN;
3268 return_low128(o->out2);
3269 return DISAS_NEXT;
3272 #ifndef CONFIG_USER_ONLY
3273 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3275 gen_helper_lura(o->out, cpu_env, o->in2);
3276 return DISAS_NEXT;
3279 static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3281 gen_helper_lurag(o->out, cpu_env, o->in2);
3282 return DISAS_NEXT;
3284 #endif
3286 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3288 tcg_gen_andi_i64(o->out, o->in2, -256);
3289 return DISAS_NEXT;
3292 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3294 const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
3296 if (get_field(s->fields, m3) > 6) {
3297 gen_program_exception(s, PGM_SPECIFICATION);
3298 return DISAS_NORETURN;
3301 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3302 tcg_gen_neg_i64(o->addr1, o->addr1);
3303 tcg_gen_movi_i64(o->out, 16);
3304 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3305 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3306 return DISAS_NEXT;
3309 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3311 o->out = o->in2;
3312 o->g_out = o->g_in2;
3313 o->in2 = NULL;
3314 o->g_in2 = false;
3315 return DISAS_NEXT;
3318 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3320 int b2 = get_field(s->fields, b2);
3321 TCGv ar1 = tcg_temp_new_i64();
3323 o->out = o->in2;
3324 o->g_out = o->g_in2;
3325 o->in2 = NULL;
3326 o->g_in2 = false;
3328 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3329 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3330 tcg_gen_movi_i64(ar1, 0);
3331 break;
3332 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3333 tcg_gen_movi_i64(ar1, 1);
3334 break;
3335 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3336 if (b2) {
3337 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3338 } else {
3339 tcg_gen_movi_i64(ar1, 0);
3341 break;
3342 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3343 tcg_gen_movi_i64(ar1, 2);
3344 break;
3347 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3348 tcg_temp_free_i64(ar1);
3350 return DISAS_NEXT;
3353 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3355 o->out = o->in1;
3356 o->out2 = o->in2;
3357 o->g_out = o->g_in1;
3358 o->g_out2 = o->g_in2;
3359 o->in1 = NULL;
3360 o->in2 = NULL;
3361 o->g_in1 = o->g_in2 = false;
3362 return DISAS_NEXT;
3365 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3367 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3368 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3369 tcg_temp_free_i32(l);
3370 return DISAS_NEXT;
3373 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3375 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3376 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3377 tcg_temp_free_i32(l);
3378 return DISAS_NEXT;
3381 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3383 int r1 = get_field(s->fields, r1);
3384 int r2 = get_field(s->fields, r2);
3385 TCGv_i32 t1, t2;
3387 /* r1 and r2 must be even. */
3388 if (r1 & 1 || r2 & 1) {
3389 gen_program_exception(s, PGM_SPECIFICATION);
3390 return DISAS_NORETURN;
3393 t1 = tcg_const_i32(r1);
3394 t2 = tcg_const_i32(r2);
3395 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3396 tcg_temp_free_i32(t1);
3397 tcg_temp_free_i32(t2);
3398 set_cc_static(s);
3399 return DISAS_NEXT;
3402 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3404 int r1 = get_field(s->fields, r1);
3405 int r3 = get_field(s->fields, r3);
3406 TCGv_i32 t1, t3;
3408 /* r1 and r3 must be even. */
3409 if (r1 & 1 || r3 & 1) {
3410 gen_program_exception(s, PGM_SPECIFICATION);
3411 return DISAS_NORETURN;
3414 t1 = tcg_const_i32(r1);
3415 t3 = tcg_const_i32(r3);
3416 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3417 tcg_temp_free_i32(t1);
3418 tcg_temp_free_i32(t3);
3419 set_cc_static(s);
3420 return DISAS_NEXT;
3423 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3425 int r1 = get_field(s->fields, r1);
3426 int r3 = get_field(s->fields, r3);
3427 TCGv_i32 t1, t3;
3429 /* r1 and r3 must be even. */
3430 if (r1 & 1 || r3 & 1) {
3431 gen_program_exception(s, PGM_SPECIFICATION);
3432 return DISAS_NORETURN;
3435 t1 = tcg_const_i32(r1);
3436 t3 = tcg_const_i32(r3);
3437 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3438 tcg_temp_free_i32(t1);
3439 tcg_temp_free_i32(t3);
3440 set_cc_static(s);
3441 return DISAS_NEXT;
3444 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3446 int r3 = get_field(s->fields, r3);
3447 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3448 set_cc_static(s);
3449 return DISAS_NEXT;
3452 #ifndef CONFIG_USER_ONLY
3453 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3455 int r1 = get_field(s->fields, l1);
3456 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3457 set_cc_static(s);
3458 return DISAS_NEXT;
3461 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3463 int r1 = get_field(s->fields, l1);
3464 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3465 set_cc_static(s);
3466 return DISAS_NEXT;
3468 #endif
3470 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3472 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3473 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3474 tcg_temp_free_i32(l);
3475 return DISAS_NEXT;
3478 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3480 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3481 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3482 tcg_temp_free_i32(l);
3483 return DISAS_NEXT;
3486 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3488 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3489 set_cc_static(s);
3490 return DISAS_NEXT;
3493 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3495 TCGv_i32 t1 = tcg_const_i32(get_field(s->fields, r1));
3496 TCGv_i32 t2 = tcg_const_i32(get_field(s->fields, r2));
3498 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3499 tcg_temp_free_i32(t1);
3500 tcg_temp_free_i32(t2);
3501 set_cc_static(s);
3502 return DISAS_NEXT;
3505 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3507 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3508 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3509 tcg_temp_free_i32(l);
3510 return DISAS_NEXT;
3513 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3515 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3516 return DISAS_NEXT;
3519 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3521 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3522 return DISAS_NEXT;
3525 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3527 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3528 return DISAS_NEXT;
3531 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3533 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3534 return DISAS_NEXT;
3537 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3539 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3540 return DISAS_NEXT;
3543 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3545 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3546 return_low128(o->out2);
3547 return DISAS_NEXT;
3550 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3552 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3553 return_low128(o->out2);
3554 return DISAS_NEXT;
3557 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3559 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3560 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3561 tcg_temp_free_i64(r3);
3562 return DISAS_NEXT;
3565 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3567 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3568 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3569 tcg_temp_free_i64(r3);
3570 return DISAS_NEXT;
3573 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3575 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3576 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3577 tcg_temp_free_i64(r3);
3578 return DISAS_NEXT;
3581 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3583 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3584 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3585 tcg_temp_free_i64(r3);
3586 return DISAS_NEXT;
3589 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3591 TCGv_i64 z, n;
3592 z = tcg_const_i64(0);
3593 n = tcg_temp_new_i64();
3594 tcg_gen_neg_i64(n, o->in2);
3595 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3596 tcg_temp_free_i64(n);
3597 tcg_temp_free_i64(z);
3598 return DISAS_NEXT;
3601 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3603 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3604 return DISAS_NEXT;
3607 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3609 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3610 return DISAS_NEXT;
3613 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3615 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3616 tcg_gen_mov_i64(o->out2, o->in2);
3617 return DISAS_NEXT;
3620 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3622 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3623 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3624 tcg_temp_free_i32(l);
3625 set_cc_static(s);
3626 return DISAS_NEXT;
3629 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3631 tcg_gen_neg_i64(o->out, o->in2);
3632 return DISAS_NEXT;
3635 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3637 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3638 return DISAS_NEXT;
3641 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3643 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3644 return DISAS_NEXT;
3647 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3649 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3650 tcg_gen_mov_i64(o->out2, o->in2);
3651 return DISAS_NEXT;
3654 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3656 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3657 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3658 tcg_temp_free_i32(l);
3659 set_cc_static(s);
3660 return DISAS_NEXT;
3663 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3665 tcg_gen_or_i64(o->out, o->in1, o->in2);
3666 return DISAS_NEXT;
3669 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3671 int shift = s->insn->data & 0xff;
3672 int size = s->insn->data >> 8;
3673 uint64_t mask = ((1ull << size) - 1) << shift;
3675 assert(!o->g_in2);
3676 tcg_gen_shli_i64(o->in2, o->in2, shift);
3677 tcg_gen_or_i64(o->out, o->in1, o->in2);
3679 /* Produce the CC from only the bits manipulated. */
3680 tcg_gen_andi_i64(cc_dst, o->out, mask);
3681 set_cc_nz_u64(s, cc_dst);
3682 return DISAS_NEXT;
3685 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3687 o->in1 = tcg_temp_new_i64();
3689 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3690 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3691 } else {
3692 /* Perform the atomic operation in memory. */
3693 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3694 s->insn->data);
3697 /* Recompute also for atomic case: needed for setting CC. */
3698 tcg_gen_or_i64(o->out, o->in1, o->in2);
3700 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3701 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3703 return DISAS_NEXT;
3706 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3708 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3709 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3710 tcg_temp_free_i32(l);
3711 return DISAS_NEXT;
3714 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3716 int l2 = get_field(s->fields, l2) + 1;
3717 TCGv_i32 l;
3719 /* The length must not exceed 32 bytes. */
3720 if (l2 > 32) {
3721 gen_program_exception(s, PGM_SPECIFICATION);
3722 return DISAS_NORETURN;
3724 l = tcg_const_i32(l2);
3725 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3726 tcg_temp_free_i32(l);
3727 return DISAS_NEXT;
3730 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3732 int l2 = get_field(s->fields, l2) + 1;
3733 TCGv_i32 l;
3735 /* The length must be even and should not exceed 64 bytes. */
3736 if ((l2 & 1) || (l2 > 64)) {
3737 gen_program_exception(s, PGM_SPECIFICATION);
3738 return DISAS_NORETURN;
3740 l = tcg_const_i32(l2);
3741 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3742 tcg_temp_free_i32(l);
3743 return DISAS_NEXT;
3746 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3748 gen_helper_popcnt(o->out, o->in2);
3749 return DISAS_NEXT;
3752 #ifndef CONFIG_USER_ONLY
3753 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3755 gen_helper_ptlb(cpu_env);
3756 return DISAS_NEXT;
3758 #endif
3760 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3762 int i3 = get_field(s->fields, i3);
3763 int i4 = get_field(s->fields, i4);
3764 int i5 = get_field(s->fields, i5);
3765 int do_zero = i4 & 0x80;
3766 uint64_t mask, imask, pmask;
3767 int pos, len, rot;
3769 /* Adjust the arguments for the specific insn. */
3770 switch (s->fields->op2) {
3771 case 0x55: /* risbg */
3772 case 0x59: /* risbgn */
3773 i3 &= 63;
3774 i4 &= 63;
3775 pmask = ~0;
3776 break;
3777 case 0x5d: /* risbhg */
3778 i3 &= 31;
3779 i4 &= 31;
3780 pmask = 0xffffffff00000000ull;
3781 break;
3782 case 0x51: /* risblg */
3783 i3 &= 31;
3784 i4 &= 31;
3785 pmask = 0x00000000ffffffffull;
3786 break;
3787 default:
3788 g_assert_not_reached();
3791 /* MASK is the set of bits to be inserted from R2.
3792 Take care for I3/I4 wraparound. */
3793 mask = pmask >> i3;
3794 if (i3 <= i4) {
3795 mask ^= pmask >> i4 >> 1;
3796 } else {
3797 mask |= ~(pmask >> i4 >> 1);
3799 mask &= pmask;
3801 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3802 insns, we need to keep the other half of the register. */
3803 imask = ~mask | ~pmask;
3804 if (do_zero) {
3805 imask = ~pmask;
3808 len = i4 - i3 + 1;
3809 pos = 63 - i4;
3810 rot = i5 & 63;
3811 if (s->fields->op2 == 0x5d) {
3812 pos += 32;
3815 /* In some cases we can implement this with extract. */
3816 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3817 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3818 return DISAS_NEXT;
3821 /* In some cases we can implement this with deposit. */
3822 if (len > 0 && (imask == 0 || ~mask == imask)) {
3823 /* Note that we rotate the bits to be inserted to the lsb, not to
3824 the position as described in the PoO. */
3825 rot = (rot - pos) & 63;
3826 } else {
3827 pos = -1;
3830 /* Rotate the input as necessary. */
3831 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3833 /* Insert the selected bits into the output. */
3834 if (pos >= 0) {
3835 if (imask == 0) {
3836 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3837 } else {
3838 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3840 } else if (imask == 0) {
3841 tcg_gen_andi_i64(o->out, o->in2, mask);
3842 } else {
3843 tcg_gen_andi_i64(o->in2, o->in2, mask);
3844 tcg_gen_andi_i64(o->out, o->out, imask);
3845 tcg_gen_or_i64(o->out, o->out, o->in2);
3847 return DISAS_NEXT;
3850 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3852 int i3 = get_field(s->fields, i3);
3853 int i4 = get_field(s->fields, i4);
3854 int i5 = get_field(s->fields, i5);
3855 uint64_t mask;
3857 /* If this is a test-only form, arrange to discard the result. */
3858 if (i3 & 0x80) {
3859 o->out = tcg_temp_new_i64();
3860 o->g_out = false;
3863 i3 &= 63;
3864 i4 &= 63;
3865 i5 &= 63;
3867 /* MASK is the set of bits to be operated on from R2.
3868 Take care for I3/I4 wraparound. */
3869 mask = ~0ull >> i3;
3870 if (i3 <= i4) {
3871 mask ^= ~0ull >> i4 >> 1;
3872 } else {
3873 mask |= ~(~0ull >> i4 >> 1);
3876 /* Rotate the input as necessary. */
3877 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3879 /* Operate. */
3880 switch (s->fields->op2) {
3881 case 0x55: /* AND */
3882 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3883 tcg_gen_and_i64(o->out, o->out, o->in2);
3884 break;
3885 case 0x56: /* OR */
3886 tcg_gen_andi_i64(o->in2, o->in2, mask);
3887 tcg_gen_or_i64(o->out, o->out, o->in2);
3888 break;
3889 case 0x57: /* XOR */
3890 tcg_gen_andi_i64(o->in2, o->in2, mask);
3891 tcg_gen_xor_i64(o->out, o->out, o->in2);
3892 break;
3893 default:
3894 abort();
3897 /* Set the CC. */
3898 tcg_gen_andi_i64(cc_dst, o->out, mask);
3899 set_cc_nz_u64(s, cc_dst);
3900 return DISAS_NEXT;
3903 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3905 tcg_gen_bswap16_i64(o->out, o->in2);
3906 return DISAS_NEXT;
3909 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3911 tcg_gen_bswap32_i64(o->out, o->in2);
3912 return DISAS_NEXT;
3915 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3917 tcg_gen_bswap64_i64(o->out, o->in2);
3918 return DISAS_NEXT;
3921 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3923 TCGv_i32 t1 = tcg_temp_new_i32();
3924 TCGv_i32 t2 = tcg_temp_new_i32();
3925 TCGv_i32 to = tcg_temp_new_i32();
3926 tcg_gen_extrl_i64_i32(t1, o->in1);
3927 tcg_gen_extrl_i64_i32(t2, o->in2);
3928 tcg_gen_rotl_i32(to, t1, t2);
3929 tcg_gen_extu_i32_i64(o->out, to);
3930 tcg_temp_free_i32(t1);
3931 tcg_temp_free_i32(t2);
3932 tcg_temp_free_i32(to);
3933 return DISAS_NEXT;
3936 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3938 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3939 return DISAS_NEXT;
3942 #ifndef CONFIG_USER_ONLY
3943 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3945 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3946 set_cc_static(s);
3947 return DISAS_NEXT;
3950 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3952 gen_helper_sacf(cpu_env, o->in2);
3953 /* Addressing mode has changed, so end the block. */
3954 return DISAS_PC_STALE;
3956 #endif
3958 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3960 int sam = s->insn->data;
3961 TCGv_i64 tsam;
3962 uint64_t mask;
3964 switch (sam) {
3965 case 0:
3966 mask = 0xffffff;
3967 break;
3968 case 1:
3969 mask = 0x7fffffff;
3970 break;
3971 default:
3972 mask = -1;
3973 break;
3976 /* Bizarre but true, we check the address of the current insn for the
3977 specification exception, not the next to be executed. Thus the PoO
3978 documents that Bad Things Happen two bytes before the end. */
3979 if (s->base.pc_next & ~mask) {
3980 gen_program_exception(s, PGM_SPECIFICATION);
3981 return DISAS_NORETURN;
3983 s->pc_tmp &= mask;
3985 tsam = tcg_const_i64(sam);
3986 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3987 tcg_temp_free_i64(tsam);
3989 /* Always exit the TB, since we (may have) changed execution mode. */
3990 return DISAS_PC_STALE;
3993 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3995 int r1 = get_field(s->fields, r1);
3996 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3997 return DISAS_NEXT;
4000 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4002 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4003 return DISAS_NEXT;
4006 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4008 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4009 return DISAS_NEXT;
4012 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4014 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4015 return_low128(o->out2);
4016 return DISAS_NEXT;
4019 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4021 gen_helper_sqeb(o->out, cpu_env, o->in2);
4022 return DISAS_NEXT;
4025 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4027 gen_helper_sqdb(o->out, cpu_env, o->in2);
4028 return DISAS_NEXT;
4031 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4033 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4034 return_low128(o->out2);
4035 return DISAS_NEXT;
4038 #ifndef CONFIG_USER_ONLY
4039 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4041 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4042 set_cc_static(s);
4043 return DISAS_NEXT;
4046 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4048 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4049 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4050 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4051 set_cc_static(s);
4052 tcg_temp_free_i32(r1);
4053 tcg_temp_free_i32(r3);
4054 return DISAS_NEXT;
4056 #endif
4058 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4060 DisasCompare c;
4061 TCGv_i64 a, h;
4062 TCGLabel *lab;
4063 int r1;
4065 disas_jcc(s, &c, get_field(s->fields, m3));
4067 /* We want to store when the condition is fulfilled, so branch
4068 out when it's not */
4069 c.cond = tcg_invert_cond(c.cond);
4071 lab = gen_new_label();
4072 if (c.is_64) {
4073 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4074 } else {
4075 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4077 free_compare(&c);
4079 r1 = get_field(s->fields, r1);
4080 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
4081 switch (s->insn->data) {
4082 case 1: /* STOCG */
4083 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4084 break;
4085 case 0: /* STOC */
4086 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4087 break;
4088 case 2: /* STOCFH */
4089 h = tcg_temp_new_i64();
4090 tcg_gen_shri_i64(h, regs[r1], 32);
4091 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4092 tcg_temp_free_i64(h);
4093 break;
4094 default:
4095 g_assert_not_reached();
4097 tcg_temp_free_i64(a);
4099 gen_set_label(lab);
4100 return DISAS_NEXT;
4103 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4105 uint64_t sign = 1ull << s->insn->data;
4106 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4107 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4108 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4109 /* The arithmetic left shift is curious in that it does not affect
4110 the sign bit. Copy that over from the source unchanged. */
4111 tcg_gen_andi_i64(o->out, o->out, ~sign);
4112 tcg_gen_andi_i64(o->in1, o->in1, sign);
4113 tcg_gen_or_i64(o->out, o->out, o->in1);
4114 return DISAS_NEXT;
4117 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4119 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4120 return DISAS_NEXT;
4123 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4125 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4126 return DISAS_NEXT;
4129 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4131 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4132 return DISAS_NEXT;
4135 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4137 gen_helper_sfpc(cpu_env, o->in2);
4138 return DISAS_NEXT;
4141 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4143 gen_helper_sfas(cpu_env, o->in2);
4144 return DISAS_NEXT;
4147 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4149 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4150 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4151 gen_helper_srnm(cpu_env, o->addr1);
4152 return DISAS_NEXT;
4155 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4157 /* Bits 0-55 are are ignored. */
4158 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4159 gen_helper_srnm(cpu_env, o->addr1);
4160 return DISAS_NEXT;
4163 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4165 TCGv_i64 tmp = tcg_temp_new_i64();
4167 /* Bits other than 61-63 are ignored. */
4168 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4170 /* No need to call a helper, we don't implement dfp */
4171 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4172 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4173 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4175 tcg_temp_free_i64(tmp);
4176 return DISAS_NEXT;
4179 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4181 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4182 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4183 set_cc_static(s);
4185 tcg_gen_shri_i64(o->in1, o->in1, 24);
4186 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4187 return DISAS_NEXT;
4190 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4192 int b1 = get_field(s->fields, b1);
4193 int d1 = get_field(s->fields, d1);
4194 int b2 = get_field(s->fields, b2);
4195 int d2 = get_field(s->fields, d2);
4196 int r3 = get_field(s->fields, r3);
4197 TCGv_i64 tmp = tcg_temp_new_i64();
4199 /* fetch all operands first */
4200 o->in1 = tcg_temp_new_i64();
4201 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4202 o->in2 = tcg_temp_new_i64();
4203 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4204 o->addr1 = get_address(s, 0, r3, 0);
4206 /* load the third operand into r3 before modifying anything */
4207 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4209 /* subtract CPU timer from first operand and store in GR0 */
4210 gen_helper_stpt(tmp, cpu_env);
4211 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4213 /* store second operand in GR1 */
4214 tcg_gen_mov_i64(regs[1], o->in2);
4216 tcg_temp_free_i64(tmp);
4217 return DISAS_NEXT;
4220 #ifndef CONFIG_USER_ONLY
4221 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4223 tcg_gen_shri_i64(o->in2, o->in2, 4);
4224 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4225 return DISAS_NEXT;
4228 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4230 gen_helper_sske(cpu_env, o->in1, o->in2);
4231 return DISAS_NEXT;
4234 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4236 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4237 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4238 return DISAS_PC_STALE_NOCHAIN;
4241 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4243 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4244 return DISAS_NEXT;
4246 #endif
4248 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4250 gen_helper_stck(o->out, cpu_env);
4251 /* ??? We don't implement clock states. */
4252 gen_op_movi_cc(s, 0);
4253 return DISAS_NEXT;
4256 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4258 TCGv_i64 c1 = tcg_temp_new_i64();
4259 TCGv_i64 c2 = tcg_temp_new_i64();
4260 TCGv_i64 todpr = tcg_temp_new_i64();
4261 gen_helper_stck(c1, cpu_env);
4262 /* 16 bit value store in an uint32_t (only valid bits set) */
4263 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4264 /* Shift the 64-bit value into its place as a zero-extended
4265 104-bit value. Note that "bit positions 64-103 are always
4266 non-zero so that they compare differently to STCK"; we set
4267 the least significant bit to 1. */
4268 tcg_gen_shli_i64(c2, c1, 56);
4269 tcg_gen_shri_i64(c1, c1, 8);
4270 tcg_gen_ori_i64(c2, c2, 0x10000);
4271 tcg_gen_or_i64(c2, c2, todpr);
4272 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4273 tcg_gen_addi_i64(o->in2, o->in2, 8);
4274 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4275 tcg_temp_free_i64(c1);
4276 tcg_temp_free_i64(c2);
4277 tcg_temp_free_i64(todpr);
4278 /* ??? We don't implement clock states. */
4279 gen_op_movi_cc(s, 0);
4280 return DISAS_NEXT;
4283 #ifndef CONFIG_USER_ONLY
4284 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4286 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4287 gen_helper_sck(cc_op, cpu_env, o->in1);
4288 set_cc_static(s);
4289 return DISAS_NEXT;
4292 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4294 gen_helper_sckc(cpu_env, o->in2);
4295 return DISAS_NEXT;
4298 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4300 gen_helper_sckpf(cpu_env, regs[0]);
4301 return DISAS_NEXT;
4304 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4306 gen_helper_stckc(o->out, cpu_env);
4307 return DISAS_NEXT;
4310 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4312 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4313 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4314 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4315 tcg_temp_free_i32(r1);
4316 tcg_temp_free_i32(r3);
4317 return DISAS_NEXT;
4320 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4322 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4323 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4324 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4325 tcg_temp_free_i32(r1);
4326 tcg_temp_free_i32(r3);
4327 return DISAS_NEXT;
4330 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4332 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4333 return DISAS_NEXT;
4336 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4338 gen_helper_spt(cpu_env, o->in2);
4339 return DISAS_NEXT;
4342 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4344 gen_helper_stfl(cpu_env);
4345 return DISAS_NEXT;
4348 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4350 gen_helper_stpt(o->out, cpu_env);
4351 return DISAS_NEXT;
4354 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4356 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4357 set_cc_static(s);
4358 return DISAS_NEXT;
4361 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4363 gen_helper_spx(cpu_env, o->in2);
4364 return DISAS_NEXT;
4367 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4369 gen_helper_xsch(cpu_env, regs[1]);
4370 set_cc_static(s);
4371 return DISAS_NEXT;
4374 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4376 gen_helper_csch(cpu_env, regs[1]);
4377 set_cc_static(s);
4378 return DISAS_NEXT;
4381 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4383 gen_helper_hsch(cpu_env, regs[1]);
4384 set_cc_static(s);
4385 return DISAS_NEXT;
4388 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4390 gen_helper_msch(cpu_env, regs[1], o->in2);
4391 set_cc_static(s);
4392 return DISAS_NEXT;
4395 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4397 gen_helper_rchp(cpu_env, regs[1]);
4398 set_cc_static(s);
4399 return DISAS_NEXT;
4402 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4404 gen_helper_rsch(cpu_env, regs[1]);
4405 set_cc_static(s);
4406 return DISAS_NEXT;
4409 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4411 gen_helper_sal(cpu_env, regs[1]);
4412 return DISAS_NEXT;
4415 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4417 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4418 return DISAS_NEXT;
4421 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4423 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4424 gen_op_movi_cc(s, 3);
4425 return DISAS_NEXT;
4428 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4430 /* The instruction is suppressed if not provided. */
4431 return DISAS_NEXT;
4434 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4436 gen_helper_ssch(cpu_env, regs[1], o->in2);
4437 set_cc_static(s);
4438 return DISAS_NEXT;
4441 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4443 gen_helper_stsch(cpu_env, regs[1], o->in2);
4444 set_cc_static(s);
4445 return DISAS_NEXT;
4448 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4450 gen_helper_stcrw(cpu_env, o->in2);
4451 set_cc_static(s);
4452 return DISAS_NEXT;
4455 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4457 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4458 set_cc_static(s);
4459 return DISAS_NEXT;
4462 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4464 gen_helper_tsch(cpu_env, regs[1], o->in2);
4465 set_cc_static(s);
4466 return DISAS_NEXT;
4469 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4471 gen_helper_chsc(cpu_env, o->in2);
4472 set_cc_static(s);
4473 return DISAS_NEXT;
4476 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4478 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4479 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4480 return DISAS_NEXT;
4483 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4485 uint64_t i2 = get_field(s->fields, i2);
4486 TCGv_i64 t;
4488 /* It is important to do what the instruction name says: STORE THEN.
4489 If we let the output hook perform the store then if we fault and
4490 restart, we'll have the wrong SYSTEM MASK in place. */
4491 t = tcg_temp_new_i64();
4492 tcg_gen_shri_i64(t, psw_mask, 56);
4493 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4494 tcg_temp_free_i64(t);
4496 if (s->fields->op == 0xac) {
4497 tcg_gen_andi_i64(psw_mask, psw_mask,
4498 (i2 << 56) | 0x00ffffffffffffffull);
4499 } else {
4500 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4503 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4504 return DISAS_PC_STALE_NOCHAIN;
4507 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4509 gen_helper_stura(cpu_env, o->in2, o->in1);
4510 return DISAS_NEXT;
4513 static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4515 gen_helper_sturg(cpu_env, o->in2, o->in1);
4516 return DISAS_NEXT;
4518 #endif
4520 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4522 gen_helper_stfle(cc_op, cpu_env, o->in2);
4523 set_cc_static(s);
4524 return DISAS_NEXT;
4527 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4529 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4530 return DISAS_NEXT;
4533 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4535 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4536 return DISAS_NEXT;
4539 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4541 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4542 return DISAS_NEXT;
4545 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4547 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4548 return DISAS_NEXT;
4551 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4553 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4554 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4555 gen_helper_stam(cpu_env, r1, o->in2, r3);
4556 tcg_temp_free_i32(r1);
4557 tcg_temp_free_i32(r3);
4558 return DISAS_NEXT;
4561 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4563 int m3 = get_field(s->fields, m3);
4564 int pos, base = s->insn->data;
4565 TCGv_i64 tmp = tcg_temp_new_i64();
4567 pos = base + ctz32(m3) * 8;
4568 switch (m3) {
4569 case 0xf:
4570 /* Effectively a 32-bit store. */
4571 tcg_gen_shri_i64(tmp, o->in1, pos);
4572 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4573 break;
4575 case 0xc:
4576 case 0x6:
4577 case 0x3:
4578 /* Effectively a 16-bit store. */
4579 tcg_gen_shri_i64(tmp, o->in1, pos);
4580 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4581 break;
4583 case 0x8:
4584 case 0x4:
4585 case 0x2:
4586 case 0x1:
4587 /* Effectively an 8-bit store. */
4588 tcg_gen_shri_i64(tmp, o->in1, pos);
4589 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4590 break;
4592 default:
4593 /* This is going to be a sequence of shifts and stores. */
4594 pos = base + 32 - 8;
4595 while (m3) {
4596 if (m3 & 0x8) {
4597 tcg_gen_shri_i64(tmp, o->in1, pos);
4598 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4599 tcg_gen_addi_i64(o->in2, o->in2, 1);
4601 m3 = (m3 << 1) & 0xf;
4602 pos -= 8;
4604 break;
4606 tcg_temp_free_i64(tmp);
4607 return DISAS_NEXT;
4610 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4612 int r1 = get_field(s->fields, r1);
4613 int r3 = get_field(s->fields, r3);
4614 int size = s->insn->data;
4615 TCGv_i64 tsize = tcg_const_i64(size);
4617 while (1) {
4618 if (size == 8) {
4619 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4620 } else {
4621 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4623 if (r1 == r3) {
4624 break;
4626 tcg_gen_add_i64(o->in2, o->in2, tsize);
4627 r1 = (r1 + 1) & 15;
4630 tcg_temp_free_i64(tsize);
4631 return DISAS_NEXT;
4634 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4636 int r1 = get_field(s->fields, r1);
4637 int r3 = get_field(s->fields, r3);
4638 TCGv_i64 t = tcg_temp_new_i64();
4639 TCGv_i64 t4 = tcg_const_i64(4);
4640 TCGv_i64 t32 = tcg_const_i64(32);
4642 while (1) {
4643 tcg_gen_shl_i64(t, regs[r1], t32);
4644 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4645 if (r1 == r3) {
4646 break;
4648 tcg_gen_add_i64(o->in2, o->in2, t4);
4649 r1 = (r1 + 1) & 15;
4652 tcg_temp_free_i64(t);
4653 tcg_temp_free_i64(t4);
4654 tcg_temp_free_i64(t32);
4655 return DISAS_NEXT;
4658 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4660 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4661 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4662 } else if (HAVE_ATOMIC128) {
4663 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4664 } else {
4665 gen_helper_exit_atomic(cpu_env);
4666 return DISAS_NORETURN;
4668 return DISAS_NEXT;
4671 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4673 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4674 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4676 gen_helper_srst(cpu_env, r1, r2);
4678 tcg_temp_free_i32(r1);
4679 tcg_temp_free_i32(r2);
4680 set_cc_static(s);
4681 return DISAS_NEXT;
4684 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4686 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4687 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4689 gen_helper_srstu(cpu_env, r1, r2);
4691 tcg_temp_free_i32(r1);
4692 tcg_temp_free_i32(r2);
4693 set_cc_static(s);
4694 return DISAS_NEXT;
4697 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4699 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4700 return DISAS_NEXT;
4703 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4705 DisasCompare cmp;
4706 TCGv_i64 borrow;
4708 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4710 /* The !borrow flag is the msb of CC. Since we want the inverse of
4711 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4712 disas_jcc(s, &cmp, 8 | 4);
4713 borrow = tcg_temp_new_i64();
4714 if (cmp.is_64) {
4715 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4716 } else {
4717 TCGv_i32 t = tcg_temp_new_i32();
4718 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4719 tcg_gen_extu_i32_i64(borrow, t);
4720 tcg_temp_free_i32(t);
4722 free_compare(&cmp);
4724 tcg_gen_sub_i64(o->out, o->out, borrow);
4725 tcg_temp_free_i64(borrow);
4726 return DISAS_NEXT;
4729 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4731 TCGv_i32 t;
4733 update_psw_addr(s);
4734 update_cc_op(s);
4736 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4737 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4738 tcg_temp_free_i32(t);
4740 t = tcg_const_i32(s->ilen);
4741 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4742 tcg_temp_free_i32(t);
4744 gen_exception(EXCP_SVC);
4745 return DISAS_NORETURN;
4748 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4750 int cc = 0;
4752 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4753 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4754 gen_op_movi_cc(s, cc);
4755 return DISAS_NEXT;
4758 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4760 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4761 set_cc_static(s);
4762 return DISAS_NEXT;
4765 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4767 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4768 set_cc_static(s);
4769 return DISAS_NEXT;
4772 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4774 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4775 set_cc_static(s);
4776 return DISAS_NEXT;
4779 #ifndef CONFIG_USER_ONLY
4781 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4783 gen_helper_testblock(cc_op, cpu_env, o->in2);
4784 set_cc_static(s);
4785 return DISAS_NEXT;
4788 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4790 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4791 set_cc_static(s);
4792 return DISAS_NEXT;
4795 #endif
4797 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4799 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4800 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4801 tcg_temp_free_i32(l1);
4802 set_cc_static(s);
4803 return DISAS_NEXT;
4806 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4808 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4809 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4810 tcg_temp_free_i32(l);
4811 set_cc_static(s);
4812 return DISAS_NEXT;
4815 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4817 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4818 return_low128(o->out2);
4819 set_cc_static(s);
4820 return DISAS_NEXT;
4823 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4825 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4826 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4827 tcg_temp_free_i32(l);
4828 set_cc_static(s);
4829 return DISAS_NEXT;
4832 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4834 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4835 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4836 tcg_temp_free_i32(l);
4837 set_cc_static(s);
4838 return DISAS_NEXT;
4841 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4843 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4844 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4845 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4846 TCGv_i32 tst = tcg_temp_new_i32();
4847 int m3 = get_field(s->fields, m3);
4849 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4850 m3 = 0;
4852 if (m3 & 1) {
4853 tcg_gen_movi_i32(tst, -1);
4854 } else {
4855 tcg_gen_extrl_i64_i32(tst, regs[0]);
4856 if (s->insn->opc & 3) {
4857 tcg_gen_ext8u_i32(tst, tst);
4858 } else {
4859 tcg_gen_ext16u_i32(tst, tst);
4862 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4864 tcg_temp_free_i32(r1);
4865 tcg_temp_free_i32(r2);
4866 tcg_temp_free_i32(sizes);
4867 tcg_temp_free_i32(tst);
4868 set_cc_static(s);
4869 return DISAS_NEXT;
4872 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4874 TCGv_i32 t1 = tcg_const_i32(0xff);
4875 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4876 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4877 tcg_temp_free_i32(t1);
4878 set_cc_static(s);
4879 return DISAS_NEXT;
4882 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4884 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4885 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4886 tcg_temp_free_i32(l);
4887 return DISAS_NEXT;
4890 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4892 int l1 = get_field(s->fields, l1) + 1;
4893 TCGv_i32 l;
4895 /* The length must not exceed 32 bytes. */
4896 if (l1 > 32) {
4897 gen_program_exception(s, PGM_SPECIFICATION);
4898 return DISAS_NORETURN;
4900 l = tcg_const_i32(l1);
4901 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4902 tcg_temp_free_i32(l);
4903 set_cc_static(s);
4904 return DISAS_NEXT;
4907 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4909 int l1 = get_field(s->fields, l1) + 1;
4910 TCGv_i32 l;
4912 /* The length must be even and should not exceed 64 bytes. */
4913 if ((l1 & 1) || (l1 > 64)) {
4914 gen_program_exception(s, PGM_SPECIFICATION);
4915 return DISAS_NORETURN;
4917 l = tcg_const_i32(l1);
4918 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4919 tcg_temp_free_i32(l);
4920 set_cc_static(s);
4921 return DISAS_NEXT;
4925 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4927 int d1 = get_field(s->fields, d1);
4928 int d2 = get_field(s->fields, d2);
4929 int b1 = get_field(s->fields, b1);
4930 int b2 = get_field(s->fields, b2);
4931 int l = get_field(s->fields, l1);
4932 TCGv_i32 t32;
4934 o->addr1 = get_address(s, 0, b1, d1);
4936 /* If the addresses are identical, this is a store/memset of zero. */
4937 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4938 o->in2 = tcg_const_i64(0);
4940 l++;
4941 while (l >= 8) {
4942 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4943 l -= 8;
4944 if (l > 0) {
4945 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4948 if (l >= 4) {
4949 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4950 l -= 4;
4951 if (l > 0) {
4952 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4955 if (l >= 2) {
4956 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4957 l -= 2;
4958 if (l > 0) {
4959 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4962 if (l) {
4963 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4965 gen_op_movi_cc(s, 0);
4966 return DISAS_NEXT;
4969 /* But in general we'll defer to a helper. */
4970 o->in2 = get_address(s, 0, b2, d2);
4971 t32 = tcg_const_i32(l);
4972 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4973 tcg_temp_free_i32(t32);
4974 set_cc_static(s);
4975 return DISAS_NEXT;
4978 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4980 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4981 return DISAS_NEXT;
4984 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4986 int shift = s->insn->data & 0xff;
4987 int size = s->insn->data >> 8;
4988 uint64_t mask = ((1ull << size) - 1) << shift;
4990 assert(!o->g_in2);
4991 tcg_gen_shli_i64(o->in2, o->in2, shift);
4992 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4994 /* Produce the CC from only the bits manipulated. */
4995 tcg_gen_andi_i64(cc_dst, o->out, mask);
4996 set_cc_nz_u64(s, cc_dst);
4997 return DISAS_NEXT;
5000 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5002 o->in1 = tcg_temp_new_i64();
5004 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5005 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5006 } else {
5007 /* Perform the atomic operation in memory. */
5008 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5009 s->insn->data);
5012 /* Recompute also for atomic case: needed for setting CC. */
5013 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5015 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5016 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5018 return DISAS_NEXT;
5021 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5023 o->out = tcg_const_i64(0);
5024 return DISAS_NEXT;
5027 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5029 o->out = tcg_const_i64(0);
5030 o->out2 = o->out;
5031 o->g_out2 = true;
5032 return DISAS_NEXT;
5035 #ifndef CONFIG_USER_ONLY
5036 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5038 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5040 gen_helper_clp(cpu_env, r2);
5041 tcg_temp_free_i32(r2);
5042 set_cc_static(s);
5043 return DISAS_NEXT;
5046 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5048 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5049 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5051 gen_helper_pcilg(cpu_env, r1, r2);
5052 tcg_temp_free_i32(r1);
5053 tcg_temp_free_i32(r2);
5054 set_cc_static(s);
5055 return DISAS_NEXT;
5058 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5060 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5061 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5063 gen_helper_pcistg(cpu_env, r1, r2);
5064 tcg_temp_free_i32(r1);
5065 tcg_temp_free_i32(r2);
5066 set_cc_static(s);
5067 return DISAS_NEXT;
5070 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5072 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5073 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5075 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5076 tcg_temp_free_i32(ar);
5077 tcg_temp_free_i32(r1);
5078 set_cc_static(s);
5079 return DISAS_NEXT;
5082 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5084 gen_helper_sic(cpu_env, o->in1, o->in2);
5085 return DISAS_NEXT;
5088 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5090 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5091 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5093 gen_helper_rpcit(cpu_env, r1, r2);
5094 tcg_temp_free_i32(r1);
5095 tcg_temp_free_i32(r2);
5096 set_cc_static(s);
5097 return DISAS_NEXT;
5100 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5102 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5103 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
5104 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5106 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5107 tcg_temp_free_i32(ar);
5108 tcg_temp_free_i32(r1);
5109 tcg_temp_free_i32(r3);
5110 set_cc_static(s);
5111 return DISAS_NEXT;
5114 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5116 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5117 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5119 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5120 tcg_temp_free_i32(ar);
5121 tcg_temp_free_i32(r1);
5122 set_cc_static(s);
5123 return DISAS_NEXT;
5125 #endif
5127 #include "translate_vx.inc.c"
5129 /* ====================================================================== */
5130 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5131 the original inputs), update the various cc data structures in order to
5132 be able to compute the new condition code. */
5134 static void cout_abs32(DisasContext *s, DisasOps *o)
5136 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5139 static void cout_abs64(DisasContext *s, DisasOps *o)
5141 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5144 static void cout_adds32(DisasContext *s, DisasOps *o)
5146 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5149 static void cout_adds64(DisasContext *s, DisasOps *o)
5151 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5154 static void cout_addu32(DisasContext *s, DisasOps *o)
5156 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5159 static void cout_addu64(DisasContext *s, DisasOps *o)
5161 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5164 static void cout_addc32(DisasContext *s, DisasOps *o)
5166 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5169 static void cout_addc64(DisasContext *s, DisasOps *o)
5171 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5174 static void cout_cmps32(DisasContext *s, DisasOps *o)
5176 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5179 static void cout_cmps64(DisasContext *s, DisasOps *o)
5181 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5184 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5186 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5189 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5191 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5194 static void cout_f32(DisasContext *s, DisasOps *o)
5196 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5199 static void cout_f64(DisasContext *s, DisasOps *o)
5201 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5204 static void cout_f128(DisasContext *s, DisasOps *o)
5206 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5209 static void cout_nabs32(DisasContext *s, DisasOps *o)
5211 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5214 static void cout_nabs64(DisasContext *s, DisasOps *o)
5216 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5219 static void cout_neg32(DisasContext *s, DisasOps *o)
5221 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5224 static void cout_neg64(DisasContext *s, DisasOps *o)
5226 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5229 static void cout_nz32(DisasContext *s, DisasOps *o)
5231 tcg_gen_ext32u_i64(cc_dst, o->out);
5232 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5235 static void cout_nz64(DisasContext *s, DisasOps *o)
5237 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5240 static void cout_s32(DisasContext *s, DisasOps *o)
5242 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5245 static void cout_s64(DisasContext *s, DisasOps *o)
5247 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5250 static void cout_subs32(DisasContext *s, DisasOps *o)
5252 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5255 static void cout_subs64(DisasContext *s, DisasOps *o)
5257 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5260 static void cout_subu32(DisasContext *s, DisasOps *o)
5262 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5265 static void cout_subu64(DisasContext *s, DisasOps *o)
5267 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5270 static void cout_subb32(DisasContext *s, DisasOps *o)
5272 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5275 static void cout_subb64(DisasContext *s, DisasOps *o)
5277 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5280 static void cout_tm32(DisasContext *s, DisasOps *o)
5282 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5285 static void cout_tm64(DisasContext *s, DisasOps *o)
5287 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5290 /* ====================================================================== */
5291 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5292 with the TCG register to which we will write. Used in combination with
5293 the "wout" generators, in some cases we need a new temporary, and in
5294 some cases we can write to a TCG global. */
5296 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5298 o->out = tcg_temp_new_i64();
5300 #define SPEC_prep_new 0
5302 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5304 o->out = tcg_temp_new_i64();
5305 o->out2 = tcg_temp_new_i64();
5307 #define SPEC_prep_new_P 0
5309 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5311 o->out = regs[get_field(f, r1)];
5312 o->g_out = true;
5314 #define SPEC_prep_r1 0
5316 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5318 int r1 = get_field(f, r1);
5319 o->out = regs[r1];
5320 o->out2 = regs[r1 + 1];
5321 o->g_out = o->g_out2 = true;
5323 #define SPEC_prep_r1_P SPEC_r1_even
5325 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5326 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5328 o->out = load_freg(get_field(f, r1));
5329 o->out2 = load_freg(get_field(f, r1) + 2);
5331 #define SPEC_prep_x1 SPEC_r1_f128
5333 /* ====================================================================== */
5334 /* The "Write OUTput" generators. These generally perform some non-trivial
5335 copy of data to TCG globals, or to main memory. The trivial cases are
5336 generally handled by having a "prep" generator install the TCG global
5337 as the destination of the operation. */
5339 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5341 store_reg(get_field(f, r1), o->out);
5343 #define SPEC_wout_r1 0
5345 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5347 int r1 = get_field(f, r1);
5348 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5350 #define SPEC_wout_r1_8 0
5352 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5354 int r1 = get_field(f, r1);
5355 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5357 #define SPEC_wout_r1_16 0
5359 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5361 store_reg32_i64(get_field(f, r1), o->out);
5363 #define SPEC_wout_r1_32 0
5365 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5367 store_reg32h_i64(get_field(f, r1), o->out);
5369 #define SPEC_wout_r1_32h 0
5371 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5373 int r1 = get_field(f, r1);
5374 store_reg32_i64(r1, o->out);
5375 store_reg32_i64(r1 + 1, o->out2);
5377 #define SPEC_wout_r1_P32 SPEC_r1_even
5379 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5381 int r1 = get_field(f, r1);
5382 store_reg32_i64(r1 + 1, o->out);
5383 tcg_gen_shri_i64(o->out, o->out, 32);
5384 store_reg32_i64(r1, o->out);
5386 #define SPEC_wout_r1_D32 SPEC_r1_even
5388 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5390 int r3 = get_field(f, r3);
5391 store_reg32_i64(r3, o->out);
5392 store_reg32_i64(r3 + 1, o->out2);
5394 #define SPEC_wout_r3_P32 SPEC_r3_even
5396 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5398 int r3 = get_field(f, r3);
5399 store_reg(r3, o->out);
5400 store_reg(r3 + 1, o->out2);
5402 #define SPEC_wout_r3_P64 SPEC_r3_even
5404 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5406 store_freg32_i64(get_field(f, r1), o->out);
5408 #define SPEC_wout_e1 0
5410 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5412 store_freg(get_field(f, r1), o->out);
5414 #define SPEC_wout_f1 0
5416 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5418 int f1 = get_field(s->fields, r1);
5419 store_freg(f1, o->out);
5420 store_freg(f1 + 2, o->out2);
5422 #define SPEC_wout_x1 SPEC_r1_f128
5424 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5426 if (get_field(f, r1) != get_field(f, r2)) {
5427 store_reg32_i64(get_field(f, r1), o->out);
5430 #define SPEC_wout_cond_r1r2_32 0
5432 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5434 if (get_field(f, r1) != get_field(f, r2)) {
5435 store_freg32_i64(get_field(f, r1), o->out);
5438 #define SPEC_wout_cond_e1e2 0
5440 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5442 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5444 #define SPEC_wout_m1_8 0
5446 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5448 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5450 #define SPEC_wout_m1_16 0
5452 #ifndef CONFIG_USER_ONLY
5453 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5455 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5457 #define SPEC_wout_m1_16a 0
5458 #endif
5460 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5462 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5464 #define SPEC_wout_m1_32 0
5466 #ifndef CONFIG_USER_ONLY
5467 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5469 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5471 #define SPEC_wout_m1_32a 0
5472 #endif
5474 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5476 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5478 #define SPEC_wout_m1_64 0
5480 #ifndef CONFIG_USER_ONLY
5481 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5483 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5485 #define SPEC_wout_m1_64a 0
5486 #endif
5488 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5490 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5492 #define SPEC_wout_m2_32 0
5494 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5496 store_reg(get_field(f, r1), o->in2);
5498 #define SPEC_wout_in2_r1 0
5500 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5502 store_reg32_i64(get_field(f, r1), o->in2);
5504 #define SPEC_wout_in2_r1_32 0
5506 /* ====================================================================== */
5507 /* The "INput 1" generators. These load the first operand to an insn. */
5509 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5511 o->in1 = load_reg(get_field(f, r1));
5513 #define SPEC_in1_r1 0
5515 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5517 o->in1 = regs[get_field(f, r1)];
5518 o->g_in1 = true;
5520 #define SPEC_in1_r1_o 0
5522 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5524 o->in1 = tcg_temp_new_i64();
5525 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5527 #define SPEC_in1_r1_32s 0
5529 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5531 o->in1 = tcg_temp_new_i64();
5532 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5534 #define SPEC_in1_r1_32u 0
5536 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5538 o->in1 = tcg_temp_new_i64();
5539 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5541 #define SPEC_in1_r1_sr32 0
5543 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5545 o->in1 = load_reg(get_field(f, r1) + 1);
5547 #define SPEC_in1_r1p1 SPEC_r1_even
5549 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5551 o->in1 = tcg_temp_new_i64();
5552 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5554 #define SPEC_in1_r1p1_32s SPEC_r1_even
5556 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5558 o->in1 = tcg_temp_new_i64();
5559 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5561 #define SPEC_in1_r1p1_32u SPEC_r1_even
5563 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5565 int r1 = get_field(f, r1);
5566 o->in1 = tcg_temp_new_i64();
5567 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5569 #define SPEC_in1_r1_D32 SPEC_r1_even
5571 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5573 o->in1 = load_reg(get_field(f, r2));
5575 #define SPEC_in1_r2 0
5577 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5579 o->in1 = tcg_temp_new_i64();
5580 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5582 #define SPEC_in1_r2_sr32 0
5584 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5586 o->in1 = load_reg(get_field(f, r3));
5588 #define SPEC_in1_r3 0
5590 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5592 o->in1 = regs[get_field(f, r3)];
5593 o->g_in1 = true;
5595 #define SPEC_in1_r3_o 0
5597 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5599 o->in1 = tcg_temp_new_i64();
5600 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5602 #define SPEC_in1_r3_32s 0
5604 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5606 o->in1 = tcg_temp_new_i64();
5607 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5609 #define SPEC_in1_r3_32u 0
5611 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5613 int r3 = get_field(f, r3);
5614 o->in1 = tcg_temp_new_i64();
5615 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5617 #define SPEC_in1_r3_D32 SPEC_r3_even
5619 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5621 o->in1 = load_freg32_i64(get_field(f, r1));
5623 #define SPEC_in1_e1 0
5625 static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5627 o->in1 = load_freg(get_field(f, r1));
5629 #define SPEC_in1_f1 0
5631 /* Load the high double word of an extended (128-bit) format FP number */
5632 static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5634 o->in1 = load_freg(get_field(f, r2));
5636 #define SPEC_in1_x2h SPEC_r2_f128
5638 static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5640 o->in1 = load_freg(get_field(f, r3));
5642 #define SPEC_in1_f3 0
5644 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5646 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5648 #define SPEC_in1_la1 0
5650 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5652 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5653 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5655 #define SPEC_in1_la2 0
5657 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5659 in1_la1(s, f, o);
5660 o->in1 = tcg_temp_new_i64();
5661 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5663 #define SPEC_in1_m1_8u 0
5665 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5667 in1_la1(s, f, o);
5668 o->in1 = tcg_temp_new_i64();
5669 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5671 #define SPEC_in1_m1_16s 0
5673 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5675 in1_la1(s, f, o);
5676 o->in1 = tcg_temp_new_i64();
5677 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5679 #define SPEC_in1_m1_16u 0
5681 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5683 in1_la1(s, f, o);
5684 o->in1 = tcg_temp_new_i64();
5685 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5687 #define SPEC_in1_m1_32s 0
5689 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5691 in1_la1(s, f, o);
5692 o->in1 = tcg_temp_new_i64();
5693 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5695 #define SPEC_in1_m1_32u 0
5697 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5699 in1_la1(s, f, o);
5700 o->in1 = tcg_temp_new_i64();
5701 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5703 #define SPEC_in1_m1_64 0
5705 /* ====================================================================== */
5706 /* The "INput 2" generators. These load the second operand to an insn. */
5708 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5710 o->in2 = regs[get_field(f, r1)];
5711 o->g_in2 = true;
5713 #define SPEC_in2_r1_o 0
5715 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5717 o->in2 = tcg_temp_new_i64();
5718 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5720 #define SPEC_in2_r1_16u 0
5722 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5724 o->in2 = tcg_temp_new_i64();
5725 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5727 #define SPEC_in2_r1_32u 0
5729 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5731 int r1 = get_field(f, r1);
5732 o->in2 = tcg_temp_new_i64();
5733 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5735 #define SPEC_in2_r1_D32 SPEC_r1_even
5737 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5739 o->in2 = load_reg(get_field(f, r2));
5741 #define SPEC_in2_r2 0
5743 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5745 o->in2 = regs[get_field(f, r2)];
5746 o->g_in2 = true;
5748 #define SPEC_in2_r2_o 0
5750 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5752 int r2 = get_field(f, r2);
5753 if (r2 != 0) {
5754 o->in2 = load_reg(r2);
5757 #define SPEC_in2_r2_nz 0
5759 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5761 o->in2 = tcg_temp_new_i64();
5762 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5764 #define SPEC_in2_r2_8s 0
5766 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5768 o->in2 = tcg_temp_new_i64();
5769 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5771 #define SPEC_in2_r2_8u 0
5773 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5775 o->in2 = tcg_temp_new_i64();
5776 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5778 #define SPEC_in2_r2_16s 0
5780 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5782 o->in2 = tcg_temp_new_i64();
5783 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5785 #define SPEC_in2_r2_16u 0
5787 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5789 o->in2 = load_reg(get_field(f, r3));
5791 #define SPEC_in2_r3 0
5793 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5795 o->in2 = tcg_temp_new_i64();
5796 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5798 #define SPEC_in2_r3_sr32 0
5800 static void in2_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5802 o->in2 = tcg_temp_new_i64();
5803 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r3)]);
5805 #define SPEC_in2_r3_32u 0
5807 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5809 o->in2 = tcg_temp_new_i64();
5810 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5812 #define SPEC_in2_r2_32s 0
5814 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5816 o->in2 = tcg_temp_new_i64();
5817 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5819 #define SPEC_in2_r2_32u 0
5821 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5823 o->in2 = tcg_temp_new_i64();
5824 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5826 #define SPEC_in2_r2_sr32 0
5828 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5830 o->in2 = load_freg32_i64(get_field(f, r2));
5832 #define SPEC_in2_e2 0
5834 static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5836 o->in2 = load_freg(get_field(f, r2));
5838 #define SPEC_in2_f2 0
5840 /* Load the low double word of an extended (128-bit) format FP number */
5841 static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5843 o->in2 = load_freg(get_field(f, r2) + 2);
5845 #define SPEC_in2_x2l SPEC_r2_f128
5847 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5849 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5851 #define SPEC_in2_ra2 0
5853 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5855 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5856 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5858 #define SPEC_in2_a2 0
5860 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5862 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5864 #define SPEC_in2_ri2 0
5866 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5868 help_l2_shift(s, f, o, 31);
5870 #define SPEC_in2_sh32 0
5872 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5874 help_l2_shift(s, f, o, 63);
5876 #define SPEC_in2_sh64 0
5878 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5880 in2_a2(s, f, o);
5881 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5883 #define SPEC_in2_m2_8u 0
5885 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5887 in2_a2(s, f, o);
5888 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5890 #define SPEC_in2_m2_16s 0
5892 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5894 in2_a2(s, f, o);
5895 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5897 #define SPEC_in2_m2_16u 0
5899 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5901 in2_a2(s, f, o);
5902 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5904 #define SPEC_in2_m2_32s 0
5906 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5908 in2_a2(s, f, o);
5909 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5911 #define SPEC_in2_m2_32u 0
5913 #ifndef CONFIG_USER_ONLY
5914 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5916 in2_a2(s, f, o);
5917 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5919 #define SPEC_in2_m2_32ua 0
5920 #endif
5922 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5924 in2_a2(s, f, o);
5925 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5927 #define SPEC_in2_m2_64 0
5929 #ifndef CONFIG_USER_ONLY
5930 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5932 in2_a2(s, f, o);
5933 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5935 #define SPEC_in2_m2_64a 0
5936 #endif
5938 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5940 in2_ri2(s, f, o);
5941 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5943 #define SPEC_in2_mri2_16u 0
5945 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5947 in2_ri2(s, f, o);
5948 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5950 #define SPEC_in2_mri2_32s 0
5952 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5954 in2_ri2(s, f, o);
5955 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5957 #define SPEC_in2_mri2_32u 0
5959 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5961 in2_ri2(s, f, o);
5962 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5964 #define SPEC_in2_mri2_64 0
5966 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5968 o->in2 = tcg_const_i64(get_field(f, i2));
5970 #define SPEC_in2_i2 0
5972 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5974 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5976 #define SPEC_in2_i2_8u 0
5978 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5980 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5982 #define SPEC_in2_i2_16u 0
5984 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5986 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5988 #define SPEC_in2_i2_32u 0
5990 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5992 uint64_t i2 = (uint16_t)get_field(f, i2);
5993 o->in2 = tcg_const_i64(i2 << s->insn->data);
5995 #define SPEC_in2_i2_16u_shl 0
5997 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5999 uint64_t i2 = (uint32_t)get_field(f, i2);
6000 o->in2 = tcg_const_i64(i2 << s->insn->data);
6002 #define SPEC_in2_i2_32u_shl 0
6004 #ifndef CONFIG_USER_ONLY
6005 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
6007 o->in2 = tcg_const_i64(s->fields->raw_insn);
6009 #define SPEC_in2_insn 0
6010 #endif
6012 /* ====================================================================== */
6014 /* Find opc within the table of insns. This is formulated as a switch
6015 statement so that (1) we get compile-time notice of cut-paste errors
6016 for duplicated opcodes, and (2) the compiler generates the binary
6017 search tree, rather than us having to post-process the table. */
6019 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6020 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6022 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6023 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6025 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6026 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6028 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6030 enum DisasInsnEnum {
6031 #include "insn-data.def"
6034 #undef E
6035 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6036 .opc = OPC, \
6037 .flags = FL, \
6038 .fmt = FMT_##FT, \
6039 .fac = FAC_##FC, \
6040 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6041 .name = #NM, \
6042 .help_in1 = in1_##I1, \
6043 .help_in2 = in2_##I2, \
6044 .help_prep = prep_##P, \
6045 .help_wout = wout_##W, \
6046 .help_cout = cout_##CC, \
6047 .help_op = op_##OP, \
6048 .data = D \
6051 /* Allow 0 to be used for NULL in the table below. */
6052 #define in1_0 NULL
6053 #define in2_0 NULL
6054 #define prep_0 NULL
6055 #define wout_0 NULL
6056 #define cout_0 NULL
6057 #define op_0 NULL
6059 #define SPEC_in1_0 0
6060 #define SPEC_in2_0 0
6061 #define SPEC_prep_0 0
6062 #define SPEC_wout_0 0
6064 /* Give smaller names to the various facilities. */
6065 #define FAC_Z S390_FEAT_ZARCH
6066 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6067 #define FAC_DFP S390_FEAT_DFP
6068 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6069 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6070 #define FAC_EE S390_FEAT_EXECUTE_EXT
6071 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6072 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6073 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6074 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6075 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6076 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6077 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6078 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6079 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6080 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6081 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6082 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6083 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6084 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6085 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6086 #define FAC_SFLE S390_FEAT_STFLE
6087 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6088 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6089 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6090 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6091 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6092 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6093 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6094 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6095 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6096 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6097 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6098 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6099 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6100 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6101 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6102 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6103 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6104 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6106 static const DisasInsn insn_info[] = {
6107 #include "insn-data.def"
6110 #undef E
6111 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6112 case OPC: return &insn_info[insn_ ## NM];
6114 static const DisasInsn *lookup_opc(uint16_t opc)
6116 switch (opc) {
6117 #include "insn-data.def"
6118 default:
6119 return NULL;
6123 #undef F
6124 #undef E
6125 #undef D
6126 #undef C
6128 /* Extract a field from the insn. The INSN should be left-aligned in
6129 the uint64_t so that we can more easily utilize the big-bit-endian
6130 definitions we extract from the Principals of Operation. */
6132 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6134 uint32_t r, m;
6136 if (f->size == 0) {
6137 return;
6140 /* Zero extract the field from the insn. */
6141 r = (insn << f->beg) >> (64 - f->size);
6143 /* Sign-extend, or un-swap the field as necessary. */
6144 switch (f->type) {
6145 case 0: /* unsigned */
6146 break;
6147 case 1: /* signed */
6148 assert(f->size <= 32);
6149 m = 1u << (f->size - 1);
6150 r = (r ^ m) - m;
6151 break;
6152 case 2: /* dl+dh split, signed 20 bit. */
6153 r = ((int8_t)r << 12) | (r >> 8);
6154 break;
6155 case 3: /* MSB stored in RXB */
6156 g_assert(f->size == 4);
6157 switch (f->beg) {
6158 case 8:
6159 r |= extract64(insn, 63 - 36, 1) << 4;
6160 break;
6161 case 12:
6162 r |= extract64(insn, 63 - 37, 1) << 4;
6163 break;
6164 case 16:
6165 r |= extract64(insn, 63 - 38, 1) << 4;
6166 break;
6167 case 32:
6168 r |= extract64(insn, 63 - 39, 1) << 4;
6169 break;
6170 default:
6171 g_assert_not_reached();
6173 break;
6174 default:
6175 abort();
6178 /* Validate that the "compressed" encoding we selected above is valid.
6179 I.e. we havn't make two different original fields overlap. */
6180 assert(((o->presentC >> f->indexC) & 1) == 0);
6181 o->presentC |= 1 << f->indexC;
6182 o->presentO |= 1 << f->indexO;
6184 o->c[f->indexC] = r;
6187 /* Lookup the insn at the current PC, extracting the operands into O and
6188 returning the info struct for the insn. Returns NULL for invalid insn. */
6190 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
6191 DisasFields *f)
6193 uint64_t insn, pc = s->base.pc_next;
6194 int op, op2, ilen;
6195 const DisasInsn *info;
6197 if (unlikely(s->ex_value)) {
6198 /* Drop the EX data now, so that it's clear on exception paths. */
6199 TCGv_i64 zero = tcg_const_i64(0);
6200 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6201 tcg_temp_free_i64(zero);
6203 /* Extract the values saved by EXECUTE. */
6204 insn = s->ex_value & 0xffffffffffff0000ull;
6205 ilen = s->ex_value & 0xf;
6206 op = insn >> 56;
6207 } else {
6208 insn = ld_code2(env, pc);
6209 op = (insn >> 8) & 0xff;
6210 ilen = get_ilen(op);
6211 switch (ilen) {
6212 case 2:
6213 insn = insn << 48;
6214 break;
6215 case 4:
6216 insn = ld_code4(env, pc) << 32;
6217 break;
6218 case 6:
6219 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6220 break;
6221 default:
6222 g_assert_not_reached();
6225 s->pc_tmp = s->base.pc_next + ilen;
6226 s->ilen = ilen;
6228 /* We can't actually determine the insn format until we've looked up
6229 the full insn opcode. Which we can't do without locating the
6230 secondary opcode. Assume by default that OP2 is at bit 40; for
6231 those smaller insns that don't actually have a secondary opcode
6232 this will correctly result in OP2 = 0. */
6233 switch (op) {
6234 case 0x01: /* E */
6235 case 0x80: /* S */
6236 case 0x82: /* S */
6237 case 0x93: /* S */
6238 case 0xb2: /* S, RRF, RRE, IE */
6239 case 0xb3: /* RRE, RRD, RRF */
6240 case 0xb9: /* RRE, RRF */
6241 case 0xe5: /* SSE, SIL */
6242 op2 = (insn << 8) >> 56;
6243 break;
6244 case 0xa5: /* RI */
6245 case 0xa7: /* RI */
6246 case 0xc0: /* RIL */
6247 case 0xc2: /* RIL */
6248 case 0xc4: /* RIL */
6249 case 0xc6: /* RIL */
6250 case 0xc8: /* SSF */
6251 case 0xcc: /* RIL */
6252 op2 = (insn << 12) >> 60;
6253 break;
6254 case 0xc5: /* MII */
6255 case 0xc7: /* SMI */
6256 case 0xd0 ... 0xdf: /* SS */
6257 case 0xe1: /* SS */
6258 case 0xe2: /* SS */
6259 case 0xe8: /* SS */
6260 case 0xe9: /* SS */
6261 case 0xea: /* SS */
6262 case 0xee ... 0xf3: /* SS */
6263 case 0xf8 ... 0xfd: /* SS */
6264 op2 = 0;
6265 break;
6266 default:
6267 op2 = (insn << 40) >> 56;
6268 break;
6271 memset(f, 0, sizeof(*f));
6272 f->raw_insn = insn;
6273 f->op = op;
6274 f->op2 = op2;
6276 /* Lookup the instruction. */
6277 info = lookup_opc(op << 8 | op2);
6279 /* If we found it, extract the operands. */
6280 if (info != NULL) {
6281 DisasFormat fmt = info->fmt;
6282 int i;
6284 for (i = 0; i < NUM_C_FIELD; ++i) {
6285 extract_field(f, &format_info[fmt].op[i], insn);
6288 return info;
6291 static bool is_afp_reg(int reg)
6293 return reg % 2 || reg > 6;
6296 static bool is_fp_pair(int reg)
6298 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6299 return !(reg & 0x2);
6302 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6304 const DisasInsn *insn;
6305 DisasJumpType ret = DISAS_NEXT;
6306 DisasFields f;
6307 DisasOps o = {};
6309 /* Search for the insn in the table. */
6310 insn = extract_insn(env, s, &f);
6312 /* Emit insn_start now that we know the ILEN. */
6313 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6315 /* Not found means unimplemented/illegal opcode. */
6316 if (insn == NULL) {
6317 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6318 f.op, f.op2);
6319 gen_illegal_opcode(s);
6320 return DISAS_NORETURN;
6323 #ifndef CONFIG_USER_ONLY
6324 if (s->base.tb->flags & FLAG_MASK_PER) {
6325 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6326 gen_helper_per_ifetch(cpu_env, addr);
6327 tcg_temp_free_i64(addr);
6329 #endif
6331 /* process flags */
6332 if (insn->flags) {
6333 /* privileged instruction */
6334 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6335 gen_program_exception(s, PGM_PRIVILEGED);
6336 return DISAS_NORETURN;
6339 /* if AFP is not enabled, instructions and registers are forbidden */
6340 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6341 uint8_t dxc = 0;
6343 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6344 dxc = 1;
6346 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6347 dxc = 1;
6349 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6350 dxc = 1;
6352 if (insn->flags & IF_BFP) {
6353 dxc = 2;
6355 if (insn->flags & IF_DFP) {
6356 dxc = 3;
6358 if (insn->flags & IF_VEC) {
6359 dxc = 0xfe;
6361 if (dxc) {
6362 gen_data_exception(dxc);
6363 return DISAS_NORETURN;
6367 /* if vector instructions not enabled, executing them is forbidden */
6368 if (insn->flags & IF_VEC) {
6369 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6370 gen_data_exception(0xfe);
6371 return DISAS_NORETURN;
6376 /* Check for insn specification exceptions. */
6377 if (insn->spec) {
6378 if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6379 (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6380 (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6381 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6382 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6383 gen_program_exception(s, PGM_SPECIFICATION);
6384 return DISAS_NORETURN;
6388 /* Set up the strutures we use to communicate with the helpers. */
6389 s->insn = insn;
6390 s->fields = &f;
6392 /* Implement the instruction. */
6393 if (insn->help_in1) {
6394 insn->help_in1(s, &f, &o);
6396 if (insn->help_in2) {
6397 insn->help_in2(s, &f, &o);
6399 if (insn->help_prep) {
6400 insn->help_prep(s, &f, &o);
6402 if (insn->help_op) {
6403 ret = insn->help_op(s, &o);
6405 if (ret != DISAS_NORETURN) {
6406 if (insn->help_wout) {
6407 insn->help_wout(s, &f, &o);
6409 if (insn->help_cout) {
6410 insn->help_cout(s, &o);
6414 /* Free any temporaries created by the helpers. */
6415 if (o.out && !o.g_out) {
6416 tcg_temp_free_i64(o.out);
6418 if (o.out2 && !o.g_out2) {
6419 tcg_temp_free_i64(o.out2);
6421 if (o.in1 && !o.g_in1) {
6422 tcg_temp_free_i64(o.in1);
6424 if (o.in2 && !o.g_in2) {
6425 tcg_temp_free_i64(o.in2);
6427 if (o.addr1) {
6428 tcg_temp_free_i64(o.addr1);
6431 #ifndef CONFIG_USER_ONLY
6432 if (s->base.tb->flags & FLAG_MASK_PER) {
6433 /* An exception might be triggered, save PSW if not already done. */
6434 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6435 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6438 /* Call the helper to check for a possible PER exception. */
6439 gen_helper_per_check_exception(cpu_env);
6441 #endif
6443 /* Advance to the next instruction. */
6444 s->base.pc_next = s->pc_tmp;
6445 return ret;
6448 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6450 DisasContext *dc = container_of(dcbase, DisasContext, base);
6452 /* 31-bit mode */
6453 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6454 dc->base.pc_first &= 0x7fffffff;
6455 dc->base.pc_next = dc->base.pc_first;
6458 dc->cc_op = CC_OP_DYNAMIC;
6459 dc->ex_value = dc->base.tb->cs_base;
6460 dc->do_debug = dc->base.singlestep_enabled;
6463 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6467 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6471 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6472 const CPUBreakpoint *bp)
6474 DisasContext *dc = container_of(dcbase, DisasContext, base);
6477 * Emit an insn_start to accompany the breakpoint exception.
6478 * The ILEN value is a dummy, since this does not result in
6479 * an s390x exception, but an internal qemu exception which
6480 * brings us back to interact with the gdbstub.
6482 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6484 dc->base.is_jmp = DISAS_PC_STALE;
6485 dc->do_debug = true;
6486 /* The address covered by the breakpoint must be included in
6487 [tb->pc, tb->pc + tb->size) in order to for it to be
6488 properly cleared -- thus we increment the PC here so that
6489 the logic setting tb->size does the right thing. */
6490 dc->base.pc_next += 2;
6491 return true;
6494 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6496 CPUS390XState *env = cs->env_ptr;
6497 DisasContext *dc = container_of(dcbase, DisasContext, base);
6499 dc->base.is_jmp = translate_one(env, dc);
6500 if (dc->base.is_jmp == DISAS_NEXT) {
6501 uint64_t page_start;
6503 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6504 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6505 dc->base.is_jmp = DISAS_TOO_MANY;
6510 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6512 DisasContext *dc = container_of(dcbase, DisasContext, base);
6514 switch (dc->base.is_jmp) {
6515 case DISAS_GOTO_TB:
6516 case DISAS_NORETURN:
6517 break;
6518 case DISAS_TOO_MANY:
6519 case DISAS_PC_STALE:
6520 case DISAS_PC_STALE_NOCHAIN:
6521 update_psw_addr(dc);
6522 /* FALLTHRU */
6523 case DISAS_PC_UPDATED:
6524 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6525 cc op type is in env */
6526 update_cc_op(dc);
6527 /* FALLTHRU */
6528 case DISAS_PC_CC_UPDATED:
6529 /* Exit the TB, either by raising a debug exception or by return. */
6530 if (dc->do_debug) {
6531 gen_exception(EXCP_DEBUG);
6532 } else if (use_exit_tb(dc) ||
6533 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6534 tcg_gen_exit_tb(NULL, 0);
6535 } else {
6536 tcg_gen_lookup_and_goto_ptr();
6538 break;
6539 default:
6540 g_assert_not_reached();
6544 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6546 DisasContext *dc = container_of(dcbase, DisasContext, base);
6548 if (unlikely(dc->ex_value)) {
6549 /* ??? Unfortunately log_target_disas can't use host memory. */
6550 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6551 } else {
6552 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6553 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6557 static const TranslatorOps s390x_tr_ops = {
6558 .init_disas_context = s390x_tr_init_disas_context,
6559 .tb_start = s390x_tr_tb_start,
6560 .insn_start = s390x_tr_insn_start,
6561 .breakpoint_check = s390x_tr_breakpoint_check,
6562 .translate_insn = s390x_tr_translate_insn,
6563 .tb_stop = s390x_tr_tb_stop,
6564 .disas_log = s390x_tr_disas_log,
6567 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6569 DisasContext dc;
6571 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6574 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6575 target_ulong *data)
6577 int cc_op = data[1];
6579 env->psw.addr = data[0];
6581 /* Update the CC opcode if it is not already up-to-date. */
6582 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6583 env->cc_op = cc_op;
6586 /* Record ILEN. */
6587 env->int_pgm_ilen = data[2];