target/s390x: Improve ADD LOGICAL WITH CARRY
[qemu/ar7.git] / target / s390x / translate.c
blobd1d97e46966816d985ec4bf7d7ced4035ec34e91
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
64 enum DisasFieldIndexO {
65 FLD_O_r1,
66 FLD_O_r2,
67 FLD_O_r3,
68 FLD_O_m1,
69 FLD_O_m3,
70 FLD_O_m4,
71 FLD_O_m5,
72 FLD_O_m6,
73 FLD_O_b1,
74 FLD_O_b2,
75 FLD_O_b4,
76 FLD_O_d1,
77 FLD_O_d2,
78 FLD_O_d4,
79 FLD_O_x2,
80 FLD_O_l1,
81 FLD_O_l2,
82 FLD_O_i1,
83 FLD_O_i2,
84 FLD_O_i3,
85 FLD_O_i4,
86 FLD_O_i5,
87 FLD_O_v1,
88 FLD_O_v2,
89 FLD_O_v3,
90 FLD_O_v4,
93 enum DisasFieldIndexC {
94 FLD_C_r1 = 0,
95 FLD_C_m1 = 0,
96 FLD_C_b1 = 0,
97 FLD_C_i1 = 0,
98 FLD_C_v1 = 0,
100 FLD_C_r2 = 1,
101 FLD_C_b2 = 1,
102 FLD_C_i2 = 1,
104 FLD_C_r3 = 2,
105 FLD_C_m3 = 2,
106 FLD_C_i3 = 2,
107 FLD_C_v3 = 2,
109 FLD_C_m4 = 3,
110 FLD_C_b4 = 3,
111 FLD_C_i4 = 3,
112 FLD_C_l1 = 3,
113 FLD_C_v4 = 3,
115 FLD_C_i5 = 4,
116 FLD_C_d1 = 4,
117 FLD_C_m5 = 4,
119 FLD_C_d2 = 5,
120 FLD_C_m6 = 5,
122 FLD_C_d4 = 6,
123 FLD_C_x2 = 6,
124 FLD_C_l2 = 6,
125 FLD_C_v2 = 6,
127 NUM_C_FIELD = 7
130 struct DisasFields {
131 uint64_t raw_insn;
132 unsigned op:8;
133 unsigned op2:8;
134 unsigned presentC:16;
135 unsigned int presentO;
136 int c[NUM_C_FIELD];
139 struct DisasContext {
140 DisasContextBase base;
141 const DisasInsn *insn;
142 DisasFields fields;
143 uint64_t ex_value;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 bool do_debug;
155 /* Information carried about a condition to be evaluated. */
156 typedef struct {
157 TCGCond cond:8;
158 bool is_64;
159 bool g1;
160 bool g2;
161 union {
162 struct { TCGv_i64 a, b; } s64;
163 struct { TCGv_i32 a, b; } s32;
164 } u;
165 } DisasCompare;
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 TCGv_i64 tmp;
176 if (s->base.tb->flags & FLAG_MASK_32) {
177 if (s->base.tb->flags & FLAG_MASK_64) {
178 tcg_gen_movi_i64(out, pc);
179 return;
181 pc |= 0x80000000;
183 assert(!(s->base.tb->flags & FLAG_MASK_64));
184 tmp = tcg_const_i64(pc);
185 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186 tcg_temp_free_i64(tmp);
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
201 void s390x_translate_init(void)
203 int i;
205 psw_addr = tcg_global_mem_new_i64(cpu_env,
206 offsetof(CPUS390XState, psw.addr),
207 "psw_addr");
208 psw_mask = tcg_global_mem_new_i64(cpu_env,
209 offsetof(CPUS390XState, psw.mask),
210 "psw_mask");
211 gbea = tcg_global_mem_new_i64(cpu_env,
212 offsetof(CPUS390XState, gbea),
213 "gbea");
215 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216 "cc_op");
217 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218 "cc_src");
219 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220 "cc_dst");
221 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222 "cc_vr");
224 for (i = 0; i < 16; i++) {
225 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226 regs[i] = tcg_global_mem_new(cpu_env,
227 offsetof(CPUS390XState, regs[i]),
228 cpu_reg_names[i]);
232 static inline int vec_full_reg_offset(uint8_t reg)
234 g_assert(reg < 32);
235 return offsetof(CPUS390XState, vregs[reg][0]);
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes = 1 << es;
242 int offs = enr * bytes;
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
252 * DW: [ 0] - [ 1]
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
258 * DW: [ 0] - [ 1]
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
266 g_assert(es <= MO_64);
267 #ifndef HOST_WORDS_BIGENDIAN
268 offs ^= (8 - bytes);
269 #endif
270 return offs + vec_full_reg_offset(reg);
273 static inline int freg64_offset(uint8_t reg)
275 g_assert(reg < 16);
276 return vec_reg_offset(reg, 0, MO_64);
279 static inline int freg32_offset(uint8_t reg)
281 g_assert(reg < 16);
282 return vec_reg_offset(reg, 0, MO_32);
285 static TCGv_i64 load_reg(int reg)
287 TCGv_i64 r = tcg_temp_new_i64();
288 tcg_gen_mov_i64(r, regs[reg]);
289 return r;
292 static TCGv_i64 load_freg(int reg)
294 TCGv_i64 r = tcg_temp_new_i64();
296 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297 return r;
300 static TCGv_i64 load_freg32_i64(int reg)
302 TCGv_i64 r = tcg_temp_new_i64();
304 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305 return r;
308 static void store_reg(int reg, TCGv_i64 v)
310 tcg_gen_mov_i64(regs[reg], v);
313 static void store_freg(int reg, TCGv_i64 v)
315 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
318 static void store_reg32_i64(int reg, TCGv_i64 v)
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
326 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
329 static void store_freg32_i64(int reg, TCGv_i64 v)
331 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
334 static void return_low128(TCGv_i64 dest)
336 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
339 static void update_psw_addr(DisasContext *s)
341 /* psw.addr */
342 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 static void per_branch(DisasContext *s, bool to_next)
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea, s->base.pc_next);
350 if (s->base.tb->flags & FLAG_MASK_PER) {
351 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352 gen_helper_per_branch(cpu_env, gbea, next_pc);
353 if (to_next) {
354 tcg_temp_free_i64(next_pc);
357 #endif
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361 TCGv_i64 arg1, TCGv_i64 arg2)
363 #ifndef CONFIG_USER_ONLY
364 if (s->base.tb->flags & FLAG_MASK_PER) {
365 TCGLabel *lab = gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 tcg_gen_movi_i64(gbea, s->base.pc_next);
369 gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 gen_set_label(lab);
372 } else {
373 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 tcg_temp_free_i64(pc);
377 #endif
380 static void per_breaking_event(DisasContext *s)
382 tcg_gen_movi_i64(gbea, s->base.pc_next);
385 static void update_cc_op(DisasContext *s)
387 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388 tcg_gen_movi_i32(cc_op, s->cc_op);
392 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
394 return (uint64_t)cpu_lduw_code(env, pc);
397 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
399 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
402 static int get_mem_index(DisasContext *s)
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 tcg_abort();
420 break;
422 #endif
425 static void gen_exception(int excp)
427 TCGv_i32 tmp = tcg_const_i32(excp);
428 gen_helper_exception(cpu_env, tmp);
429 tcg_temp_free_i32(tmp);
432 static void gen_program_exception(DisasContext *s, int code)
434 TCGv_i32 tmp;
436 /* Remember what pgm exeption this was. */
437 tmp = tcg_const_i32(code);
438 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
439 tcg_temp_free_i32(tmp);
441 tmp = tcg_const_i32(s->ilen);
442 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
443 tcg_temp_free_i32(tmp);
445 /* update the psw */
446 update_psw_addr(s);
448 /* Save off cc. */
449 update_cc_op(s);
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM);
455 static inline void gen_illegal_opcode(DisasContext *s)
457 gen_program_exception(s, PGM_OPERATION);
460 static inline void gen_data_exception(uint8_t dxc)
462 TCGv_i32 tmp = tcg_const_i32(dxc);
463 gen_helper_data_exception(cpu_env, tmp);
464 tcg_temp_free_i32(tmp);
467 static inline void gen_trap(DisasContext *s)
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
473 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
474 int64_t imm)
476 tcg_gen_addi_i64(dst, src, imm);
477 if (!(s->base.tb->flags & FLAG_MASK_64)) {
478 if (s->base.tb->flags & FLAG_MASK_32) {
479 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
480 } else {
481 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
486 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
488 TCGv_i64 tmp = tcg_temp_new_i64();
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
494 if (b2 && x2) {
495 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
496 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
497 } else if (b2) {
498 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
499 } else if (x2) {
500 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
501 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
502 if (s->base.tb->flags & FLAG_MASK_32) {
503 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
504 } else {
505 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
507 } else {
508 tcg_gen_movi_i64(tmp, d2);
511 return tmp;
514 static inline bool live_cc_data(DisasContext *s)
516 return (s->cc_op != CC_OP_DYNAMIC
517 && s->cc_op != CC_OP_STATIC
518 && s->cc_op > 3);
521 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
523 if (live_cc_data(s)) {
524 tcg_gen_discard_i64(cc_src);
525 tcg_gen_discard_i64(cc_dst);
526 tcg_gen_discard_i64(cc_vr);
528 s->cc_op = CC_OP_CONST0 + val;
531 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
533 if (live_cc_data(s)) {
534 tcg_gen_discard_i64(cc_src);
535 tcg_gen_discard_i64(cc_vr);
537 tcg_gen_mov_i64(cc_dst, dst);
538 s->cc_op = op;
541 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542 TCGv_i64 dst)
544 if (live_cc_data(s)) {
545 tcg_gen_discard_i64(cc_vr);
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 s->cc_op = op;
552 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
553 TCGv_i64 dst, TCGv_i64 vr)
555 tcg_gen_mov_i64(cc_src, src);
556 tcg_gen_mov_i64(cc_dst, dst);
557 tcg_gen_mov_i64(cc_vr, vr);
558 s->cc_op = op;
561 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
563 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
566 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
568 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
571 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
573 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
576 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
578 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext *s)
584 if (live_cc_data(s)) {
585 tcg_gen_discard_i64(cc_src);
586 tcg_gen_discard_i64(cc_dst);
587 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext *s)
595 TCGv_i32 local_cc_op = NULL;
596 TCGv_i64 dummy = NULL;
598 switch (s->cc_op) {
599 default:
600 dummy = tcg_const_i64(0);
601 /* FALLTHRU */
602 case CC_OP_ADD_64:
603 case CC_OP_SUB_64:
604 case CC_OP_SUBU_64:
605 case CC_OP_SUBB_64:
606 case CC_OP_ADD_32:
607 case CC_OP_SUB_32:
608 case CC_OP_SUBU_32:
609 case CC_OP_SUBB_32:
610 local_cc_op = tcg_const_i32(s->cc_op);
611 break;
612 case CC_OP_CONST0:
613 case CC_OP_CONST1:
614 case CC_OP_CONST2:
615 case CC_OP_CONST3:
616 case CC_OP_STATIC:
617 case CC_OP_DYNAMIC:
618 break;
621 switch (s->cc_op) {
622 case CC_OP_CONST0:
623 case CC_OP_CONST1:
624 case CC_OP_CONST2:
625 case CC_OP_CONST3:
626 /* s->cc_op is the cc value */
627 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
628 break;
629 case CC_OP_STATIC:
630 /* env->cc_op already is the cc value */
631 break;
632 case CC_OP_NZ:
633 case CC_OP_ABS_64:
634 case CC_OP_NABS_64:
635 case CC_OP_ABS_32:
636 case CC_OP_NABS_32:
637 case CC_OP_LTGT0_32:
638 case CC_OP_LTGT0_64:
639 case CC_OP_COMP_32:
640 case CC_OP_COMP_64:
641 case CC_OP_NZ_F32:
642 case CC_OP_NZ_F64:
643 case CC_OP_FLOGR:
644 case CC_OP_LCBB:
645 case CC_OP_MULS_32:
646 /* 1 argument */
647 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
648 break;
649 case CC_OP_ADDU:
650 case CC_OP_ICM:
651 case CC_OP_LTGT_32:
652 case CC_OP_LTGT_64:
653 case CC_OP_LTUGTU_32:
654 case CC_OP_LTUGTU_64:
655 case CC_OP_TM_32:
656 case CC_OP_TM_64:
657 case CC_OP_SLA_32:
658 case CC_OP_SLA_64:
659 case CC_OP_NZ_F128:
660 case CC_OP_VC:
661 case CC_OP_MULS_64:
662 /* 2 arguments */
663 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
664 break;
665 case CC_OP_ADD_64:
666 case CC_OP_SUB_64:
667 case CC_OP_SUBU_64:
668 case CC_OP_SUBB_64:
669 case CC_OP_ADD_32:
670 case CC_OP_SUB_32:
671 case CC_OP_SUBU_32:
672 case CC_OP_SUBB_32:
673 /* 3 arguments */
674 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
675 break;
676 case CC_OP_DYNAMIC:
677 /* unknown operation - assume 3 arguments and cc_op in env */
678 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
679 break;
680 default:
681 tcg_abort();
684 if (local_cc_op) {
685 tcg_temp_free_i32(local_cc_op);
687 if (dummy) {
688 tcg_temp_free_i64(dummy);
691 /* We now have cc in cc_op as constant */
692 set_cc_static(s);
695 static bool use_exit_tb(DisasContext *s)
697 return s->base.singlestep_enabled ||
698 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
699 (s->base.tb->flags & FLAG_MASK_PER);
702 static bool use_goto_tb(DisasContext *s, uint64_t dest)
704 if (unlikely(use_exit_tb(s))) {
705 return false;
707 #ifndef CONFIG_USER_ONLY
708 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
709 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
710 #else
711 return true;
712 #endif
715 static void account_noninline_branch(DisasContext *s, int cc_op)
717 #ifdef DEBUG_INLINE_BRANCHES
718 inline_branch_miss[cc_op]++;
719 #endif
722 static void account_inline_branch(DisasContext *s, int cc_op)
724 #ifdef DEBUG_INLINE_BRANCHES
725 inline_branch_hit[cc_op]++;
726 #endif
729 /* Table of mask values to comparison codes, given a comparison as input.
730 For such, CC=3 should not be possible. */
731 static const TCGCond ltgt_cond[16] = {
732 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
733 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
734 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
735 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
736 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
737 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
738 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
739 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
742 /* Table of mask values to comparison codes, given a logic op as input.
743 For such, only CC=0 and CC=1 should be possible. */
744 static const TCGCond nz_cond[16] = {
745 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
746 TCG_COND_NEVER, TCG_COND_NEVER,
747 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
748 TCG_COND_NE, TCG_COND_NE,
749 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
750 TCG_COND_EQ, TCG_COND_EQ,
751 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
752 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
755 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
756 details required to generate a TCG comparison. */
757 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
759 TCGCond cond;
760 enum cc_op old_cc_op = s->cc_op;
762 if (mask == 15 || mask == 0) {
763 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
764 c->u.s32.a = cc_op;
765 c->u.s32.b = cc_op;
766 c->g1 = c->g2 = true;
767 c->is_64 = false;
768 return;
771 /* Find the TCG condition for the mask + cc op. */
772 switch (old_cc_op) {
773 case CC_OP_LTGT0_32:
774 case CC_OP_LTGT0_64:
775 case CC_OP_LTGT_32:
776 case CC_OP_LTGT_64:
777 cond = ltgt_cond[mask];
778 if (cond == TCG_COND_NEVER) {
779 goto do_dynamic;
781 account_inline_branch(s, old_cc_op);
782 break;
784 case CC_OP_LTUGTU_32:
785 case CC_OP_LTUGTU_64:
786 cond = tcg_unsigned_cond(ltgt_cond[mask]);
787 if (cond == TCG_COND_NEVER) {
788 goto do_dynamic;
790 account_inline_branch(s, old_cc_op);
791 break;
793 case CC_OP_NZ:
794 cond = nz_cond[mask];
795 if (cond == TCG_COND_NEVER) {
796 goto do_dynamic;
798 account_inline_branch(s, old_cc_op);
799 break;
801 case CC_OP_TM_32:
802 case CC_OP_TM_64:
803 switch (mask) {
804 case 8:
805 cond = TCG_COND_EQ;
806 break;
807 case 4 | 2 | 1:
808 cond = TCG_COND_NE;
809 break;
810 default:
811 goto do_dynamic;
813 account_inline_branch(s, old_cc_op);
814 break;
816 case CC_OP_ICM:
817 switch (mask) {
818 case 8:
819 cond = TCG_COND_EQ;
820 break;
821 case 4 | 2 | 1:
822 case 4 | 2:
823 cond = TCG_COND_NE;
824 break;
825 default:
826 goto do_dynamic;
828 account_inline_branch(s, old_cc_op);
829 break;
831 case CC_OP_FLOGR:
832 switch (mask & 0xa) {
833 case 8: /* src == 0 -> no one bit found */
834 cond = TCG_COND_EQ;
835 break;
836 case 2: /* src != 0 -> one bit found */
837 cond = TCG_COND_NE;
838 break;
839 default:
840 goto do_dynamic;
842 account_inline_branch(s, old_cc_op);
843 break;
845 case CC_OP_ADDU:
846 switch (mask) {
847 case 8 | 2: /* result == 0 */
848 cond = TCG_COND_EQ;
849 break;
850 case 4 | 1: /* result != 0 */
851 cond = TCG_COND_NE;
852 break;
853 case 8 | 4: /* no carry */
854 cond = TCG_COND_EQ;
855 break;
856 case 2 | 1: /* carry */
857 cond = TCG_COND_NE;
858 break;
859 default:
860 goto do_dynamic;
862 account_inline_branch(s, old_cc_op);
863 break;
865 case CC_OP_SUBU_32:
866 case CC_OP_SUBU_64:
867 /* Note that CC=0 is impossible; treat it as dont-care. */
868 switch (mask & 7) {
869 case 2: /* zero -> op1 == op2 */
870 cond = TCG_COND_EQ;
871 break;
872 case 4 | 1: /* !zero -> op1 != op2 */
873 cond = TCG_COND_NE;
874 break;
875 case 4: /* borrow (!carry) -> op1 < op2 */
876 cond = TCG_COND_LTU;
877 break;
878 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
879 cond = TCG_COND_GEU;
880 break;
881 default:
882 goto do_dynamic;
884 account_inline_branch(s, old_cc_op);
885 break;
887 default:
888 do_dynamic:
889 /* Calculate cc value. */
890 gen_op_calc_cc(s);
891 /* FALLTHRU */
893 case CC_OP_STATIC:
894 /* Jump based on CC. We'll load up the real cond below;
895 the assignment here merely avoids a compiler warning. */
896 account_noninline_branch(s, old_cc_op);
897 old_cc_op = CC_OP_STATIC;
898 cond = TCG_COND_NEVER;
899 break;
902 /* Load up the arguments of the comparison. */
903 c->is_64 = true;
904 c->g1 = c->g2 = false;
905 switch (old_cc_op) {
906 case CC_OP_LTGT0_32:
907 c->is_64 = false;
908 c->u.s32.a = tcg_temp_new_i32();
909 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
910 c->u.s32.b = tcg_const_i32(0);
911 break;
912 case CC_OP_LTGT_32:
913 case CC_OP_LTUGTU_32:
914 case CC_OP_SUBU_32:
915 c->is_64 = false;
916 c->u.s32.a = tcg_temp_new_i32();
917 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
918 c->u.s32.b = tcg_temp_new_i32();
919 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
920 break;
922 case CC_OP_LTGT0_64:
923 case CC_OP_NZ:
924 case CC_OP_FLOGR:
925 c->u.s64.a = cc_dst;
926 c->u.s64.b = tcg_const_i64(0);
927 c->g1 = true;
928 break;
929 case CC_OP_LTGT_64:
930 case CC_OP_LTUGTU_64:
931 case CC_OP_SUBU_64:
932 c->u.s64.a = cc_src;
933 c->u.s64.b = cc_dst;
934 c->g1 = c->g2 = true;
935 break;
937 case CC_OP_TM_32:
938 case CC_OP_TM_64:
939 case CC_OP_ICM:
940 c->u.s64.a = tcg_temp_new_i64();
941 c->u.s64.b = tcg_const_i64(0);
942 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
943 break;
945 case CC_OP_ADDU:
946 c->is_64 = true;
947 c->u.s64.b = tcg_const_i64(0);
948 c->g1 = true;
949 switch (mask) {
950 case 8 | 2:
951 case 4 | 1: /* result */
952 c->u.s64.a = cc_dst;
953 break;
954 case 8 | 4:
955 case 2 | 1: /* carry */
956 c->u.s64.a = cc_src;
957 break;
958 default:
959 g_assert_not_reached();
961 break;
963 case CC_OP_STATIC:
964 c->is_64 = false;
965 c->u.s32.a = cc_op;
966 c->g1 = true;
967 switch (mask) {
968 case 0x8 | 0x4 | 0x2: /* cc != 3 */
969 cond = TCG_COND_NE;
970 c->u.s32.b = tcg_const_i32(3);
971 break;
972 case 0x8 | 0x4 | 0x1: /* cc != 2 */
973 cond = TCG_COND_NE;
974 c->u.s32.b = tcg_const_i32(2);
975 break;
976 case 0x8 | 0x2 | 0x1: /* cc != 1 */
977 cond = TCG_COND_NE;
978 c->u.s32.b = tcg_const_i32(1);
979 break;
980 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
981 cond = TCG_COND_EQ;
982 c->g1 = false;
983 c->u.s32.a = tcg_temp_new_i32();
984 c->u.s32.b = tcg_const_i32(0);
985 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
986 break;
987 case 0x8 | 0x4: /* cc < 2 */
988 cond = TCG_COND_LTU;
989 c->u.s32.b = tcg_const_i32(2);
990 break;
991 case 0x8: /* cc == 0 */
992 cond = TCG_COND_EQ;
993 c->u.s32.b = tcg_const_i32(0);
994 break;
995 case 0x4 | 0x2 | 0x1: /* cc != 0 */
996 cond = TCG_COND_NE;
997 c->u.s32.b = tcg_const_i32(0);
998 break;
999 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
1000 cond = TCG_COND_NE;
1001 c->g1 = false;
1002 c->u.s32.a = tcg_temp_new_i32();
1003 c->u.s32.b = tcg_const_i32(0);
1004 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
1005 break;
1006 case 0x4: /* cc == 1 */
1007 cond = TCG_COND_EQ;
1008 c->u.s32.b = tcg_const_i32(1);
1009 break;
1010 case 0x2 | 0x1: /* cc > 1 */
1011 cond = TCG_COND_GTU;
1012 c->u.s32.b = tcg_const_i32(1);
1013 break;
1014 case 0x2: /* cc == 2 */
1015 cond = TCG_COND_EQ;
1016 c->u.s32.b = tcg_const_i32(2);
1017 break;
1018 case 0x1: /* cc == 3 */
1019 cond = TCG_COND_EQ;
1020 c->u.s32.b = tcg_const_i32(3);
1021 break;
1022 default:
1023 /* CC is masked by something else: (8 >> cc) & mask. */
1024 cond = TCG_COND_NE;
1025 c->g1 = false;
1026 c->u.s32.a = tcg_const_i32(8);
1027 c->u.s32.b = tcg_const_i32(0);
1028 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
1029 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1030 break;
1032 break;
1034 default:
1035 abort();
1037 c->cond = cond;
1040 static void free_compare(DisasCompare *c)
1042 if (!c->g1) {
1043 if (c->is_64) {
1044 tcg_temp_free_i64(c->u.s64.a);
1045 } else {
1046 tcg_temp_free_i32(c->u.s32.a);
1049 if (!c->g2) {
1050 if (c->is_64) {
1051 tcg_temp_free_i64(c->u.s64.b);
1052 } else {
1053 tcg_temp_free_i32(c->u.s32.b);
1058 /* ====================================================================== */
1059 /* Define the insn format enumeration. */
1060 #define F0(N) FMT_##N,
1061 #define F1(N, X1) F0(N)
1062 #define F2(N, X1, X2) F0(N)
1063 #define F3(N, X1, X2, X3) F0(N)
1064 #define F4(N, X1, X2, X3, X4) F0(N)
1065 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1066 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1068 typedef enum {
1069 #include "insn-format.def"
1070 } DisasFormat;
1072 #undef F0
1073 #undef F1
1074 #undef F2
1075 #undef F3
1076 #undef F4
1077 #undef F5
1078 #undef F6
1080 /* This is the way fields are to be accessed out of DisasFields. */
1081 #define have_field(S, F) have_field1((S), FLD_O_##F)
1082 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1084 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1086 return (s->fields.presentO >> c) & 1;
1089 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1090 enum DisasFieldIndexC c)
1092 assert(have_field1(s, o));
1093 return s->fields.c[c];
1096 /* Describe the layout of each field in each format. */
1097 typedef struct DisasField {
1098 unsigned int beg:8;
1099 unsigned int size:8;
1100 unsigned int type:2;
1101 unsigned int indexC:6;
1102 enum DisasFieldIndexO indexO:8;
1103 } DisasField;
1105 typedef struct DisasFormatInfo {
1106 DisasField op[NUM_C_FIELD];
1107 } DisasFormatInfo;
1109 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1110 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1111 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1112 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1113 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1114 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1115 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1116 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1117 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1118 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1119 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1120 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1121 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1122 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1123 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1125 #define F0(N) { { } },
1126 #define F1(N, X1) { { X1 } },
1127 #define F2(N, X1, X2) { { X1, X2 } },
1128 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1129 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1130 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1131 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1133 static const DisasFormatInfo format_info[] = {
1134 #include "insn-format.def"
1137 #undef F0
1138 #undef F1
1139 #undef F2
1140 #undef F3
1141 #undef F4
1142 #undef F5
1143 #undef F6
1144 #undef R
1145 #undef M
1146 #undef V
1147 #undef BD
1148 #undef BXD
1149 #undef BDL
1150 #undef BXDL
1151 #undef I
1152 #undef L
1154 /* Generally, we'll extract operands into this structures, operate upon
1155 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1156 of routines below for more details. */
1157 typedef struct {
1158 bool g_out, g_out2, g_in1, g_in2;
1159 TCGv_i64 out, out2, in1, in2;
1160 TCGv_i64 addr1;
1161 } DisasOps;
1163 /* Instructions can place constraints on their operands, raising specification
1164 exceptions if they are violated. To make this easy to automate, each "in1",
1165 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1166 of the following, or 0. To make this easy to document, we'll put the
1167 SPEC_<name> defines next to <name>. */
1169 #define SPEC_r1_even 1
1170 #define SPEC_r2_even 2
1171 #define SPEC_r3_even 4
1172 #define SPEC_r1_f128 8
1173 #define SPEC_r2_f128 16
1175 /* Return values from translate_one, indicating the state of the TB. */
1177 /* We are not using a goto_tb (for whatever reason), but have updated
1178 the PC (for whatever reason), so there's no need to do it again on
1179 exiting the TB. */
1180 #define DISAS_PC_UPDATED DISAS_TARGET_0
1182 /* We have emitted one or more goto_tb. No fixup required. */
1183 #define DISAS_GOTO_TB DISAS_TARGET_1
1185 /* We have updated the PC and CC values. */
1186 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1188 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1189 updated the PC for the next instruction to be executed. */
1190 #define DISAS_PC_STALE DISAS_TARGET_3
1192 /* We are exiting the TB to the main loop. */
1193 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1196 /* Instruction flags */
1197 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1198 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1199 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1200 #define IF_BFP 0x0008 /* binary floating point instruction */
1201 #define IF_DFP 0x0010 /* decimal floating point instruction */
1202 #define IF_PRIV 0x0020 /* privileged instruction */
1203 #define IF_VEC 0x0040 /* vector instruction */
1204 #define IF_IO 0x0080 /* input/output instruction */
1206 struct DisasInsn {
1207 unsigned opc:16;
1208 unsigned flags:16;
1209 DisasFormat fmt:8;
1210 unsigned fac:8;
1211 unsigned spec:8;
1213 const char *name;
1215 /* Pre-process arguments before HELP_OP. */
1216 void (*help_in1)(DisasContext *, DisasOps *);
1217 void (*help_in2)(DisasContext *, DisasOps *);
1218 void (*help_prep)(DisasContext *, DisasOps *);
1221 * Post-process output after HELP_OP.
1222 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1224 void (*help_wout)(DisasContext *, DisasOps *);
1225 void (*help_cout)(DisasContext *, DisasOps *);
1227 /* Implement the operation itself. */
1228 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1230 uint64_t data;
1233 /* ====================================================================== */
1234 /* Miscellaneous helpers, used by several operations. */
1236 static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1238 int b2 = get_field(s, b2);
1239 int d2 = get_field(s, d2);
1241 if (b2 == 0) {
1242 o->in2 = tcg_const_i64(d2 & mask);
1243 } else {
1244 o->in2 = get_address(s, 0, b2, d2);
1245 tcg_gen_andi_i64(o->in2, o->in2, mask);
1249 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1251 if (dest == s->pc_tmp) {
1252 per_branch(s, true);
1253 return DISAS_NEXT;
1255 if (use_goto_tb(s, dest)) {
1256 update_cc_op(s);
1257 per_breaking_event(s);
1258 tcg_gen_goto_tb(0);
1259 tcg_gen_movi_i64(psw_addr, dest);
1260 tcg_gen_exit_tb(s->base.tb, 0);
1261 return DISAS_GOTO_TB;
1262 } else {
1263 tcg_gen_movi_i64(psw_addr, dest);
1264 per_branch(s, false);
1265 return DISAS_PC_UPDATED;
1269 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1270 bool is_imm, int imm, TCGv_i64 cdest)
1272 DisasJumpType ret;
1273 uint64_t dest = s->base.pc_next + 2 * imm;
1274 TCGLabel *lab;
1276 /* Take care of the special cases first. */
1277 if (c->cond == TCG_COND_NEVER) {
1278 ret = DISAS_NEXT;
1279 goto egress;
1281 if (is_imm) {
1282 if (dest == s->pc_tmp) {
1283 /* Branch to next. */
1284 per_branch(s, true);
1285 ret = DISAS_NEXT;
1286 goto egress;
1288 if (c->cond == TCG_COND_ALWAYS) {
1289 ret = help_goto_direct(s, dest);
1290 goto egress;
1292 } else {
1293 if (!cdest) {
1294 /* E.g. bcr %r0 -> no branch. */
1295 ret = DISAS_NEXT;
1296 goto egress;
1298 if (c->cond == TCG_COND_ALWAYS) {
1299 tcg_gen_mov_i64(psw_addr, cdest);
1300 per_branch(s, false);
1301 ret = DISAS_PC_UPDATED;
1302 goto egress;
1306 if (use_goto_tb(s, s->pc_tmp)) {
1307 if (is_imm && use_goto_tb(s, dest)) {
1308 /* Both exits can use goto_tb. */
1309 update_cc_op(s);
1311 lab = gen_new_label();
1312 if (c->is_64) {
1313 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1314 } else {
1315 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1318 /* Branch not taken. */
1319 tcg_gen_goto_tb(0);
1320 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1321 tcg_gen_exit_tb(s->base.tb, 0);
1323 /* Branch taken. */
1324 gen_set_label(lab);
1325 per_breaking_event(s);
1326 tcg_gen_goto_tb(1);
1327 tcg_gen_movi_i64(psw_addr, dest);
1328 tcg_gen_exit_tb(s->base.tb, 1);
1330 ret = DISAS_GOTO_TB;
1331 } else {
1332 /* Fallthru can use goto_tb, but taken branch cannot. */
1333 /* Store taken branch destination before the brcond. This
1334 avoids having to allocate a new local temp to hold it.
1335 We'll overwrite this in the not taken case anyway. */
1336 if (!is_imm) {
1337 tcg_gen_mov_i64(psw_addr, cdest);
1340 lab = gen_new_label();
1341 if (c->is_64) {
1342 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1343 } else {
1344 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1347 /* Branch not taken. */
1348 update_cc_op(s);
1349 tcg_gen_goto_tb(0);
1350 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1351 tcg_gen_exit_tb(s->base.tb, 0);
1353 gen_set_label(lab);
1354 if (is_imm) {
1355 tcg_gen_movi_i64(psw_addr, dest);
1357 per_breaking_event(s);
1358 ret = DISAS_PC_UPDATED;
1360 } else {
1361 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1362 Most commonly we're single-stepping or some other condition that
1363 disables all use of goto_tb. Just update the PC and exit. */
1365 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1366 if (is_imm) {
1367 cdest = tcg_const_i64(dest);
1370 if (c->is_64) {
1371 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1372 cdest, next);
1373 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1374 } else {
1375 TCGv_i32 t0 = tcg_temp_new_i32();
1376 TCGv_i64 t1 = tcg_temp_new_i64();
1377 TCGv_i64 z = tcg_const_i64(0);
1378 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1379 tcg_gen_extu_i32_i64(t1, t0);
1380 tcg_temp_free_i32(t0);
1381 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1382 per_branch_cond(s, TCG_COND_NE, t1, z);
1383 tcg_temp_free_i64(t1);
1384 tcg_temp_free_i64(z);
1387 if (is_imm) {
1388 tcg_temp_free_i64(cdest);
1390 tcg_temp_free_i64(next);
1392 ret = DISAS_PC_UPDATED;
1395 egress:
1396 free_compare(c);
1397 return ret;
1400 /* ====================================================================== */
1401 /* The operations. These perform the bulk of the work for any insn,
1402 usually after the operands have been loaded and output initialized. */
1404 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1406 tcg_gen_abs_i64(o->out, o->in2);
1407 return DISAS_NEXT;
1410 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1412 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1413 return DISAS_NEXT;
1416 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1418 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1419 return DISAS_NEXT;
1422 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1424 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1425 tcg_gen_mov_i64(o->out2, o->in2);
1426 return DISAS_NEXT;
1429 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1431 tcg_gen_add_i64(o->out, o->in1, o->in2);
1432 return DISAS_NEXT;
1435 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1437 tcg_gen_movi_i64(cc_src, 0);
1438 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1439 return DISAS_NEXT;
1442 /* Compute carry into cc_src. */
1443 static void compute_carry(DisasContext *s)
1445 switch (s->cc_op) {
1446 case CC_OP_ADDU:
1447 /* The carry value is already in cc_src (1,0). */
1448 break;
1449 default:
1450 gen_op_calc_cc(s);
1451 /* fall through */
1452 case CC_OP_STATIC:
1453 /* The carry flag is the msb of CC; compute into cc_src. */
1454 tcg_gen_extu_i32_i64(cc_src, cc_op);
1455 tcg_gen_shri_i64(cc_src, cc_src, 1);
1456 break;
1460 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1462 compute_carry(s);
1463 tcg_gen_add_i64(o->out, o->in1, o->in2);
1464 tcg_gen_add_i64(o->out, o->out, cc_src);
1465 return DISAS_NEXT;
1468 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1470 compute_carry(s);
1472 TCGv_i64 zero = tcg_const_i64(0);
1473 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1474 tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1475 tcg_temp_free_i64(zero);
1477 return DISAS_NEXT;
1480 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1482 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1484 o->in1 = tcg_temp_new_i64();
1485 if (non_atomic) {
1486 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1487 } else {
1488 /* Perform the atomic addition in memory. */
1489 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1490 s->insn->data);
1493 /* Recompute also for atomic case: needed for setting CC. */
1494 tcg_gen_add_i64(o->out, o->in1, o->in2);
1496 if (non_atomic) {
1497 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1499 return DISAS_NEXT;
1502 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1504 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1506 o->in1 = tcg_temp_new_i64();
1507 if (non_atomic) {
1508 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1509 } else {
1510 /* Perform the atomic addition in memory. */
1511 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1512 s->insn->data);
1515 /* Recompute also for atomic case: needed for setting CC. */
1516 tcg_gen_movi_i64(cc_src, 0);
1517 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1519 if (non_atomic) {
1520 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1522 return DISAS_NEXT;
1525 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1527 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1528 return DISAS_NEXT;
1531 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1533 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1534 return DISAS_NEXT;
1537 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1539 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1540 return_low128(o->out2);
1541 return DISAS_NEXT;
1544 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1546 tcg_gen_and_i64(o->out, o->in1, o->in2);
1547 return DISAS_NEXT;
1550 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1552 int shift = s->insn->data & 0xff;
1553 int size = s->insn->data >> 8;
1554 uint64_t mask = ((1ull << size) - 1) << shift;
1556 assert(!o->g_in2);
1557 tcg_gen_shli_i64(o->in2, o->in2, shift);
1558 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1559 tcg_gen_and_i64(o->out, o->in1, o->in2);
1561 /* Produce the CC from only the bits manipulated. */
1562 tcg_gen_andi_i64(cc_dst, o->out, mask);
1563 set_cc_nz_u64(s, cc_dst);
1564 return DISAS_NEXT;
1567 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1569 o->in1 = tcg_temp_new_i64();
1571 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1572 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1573 } else {
1574 /* Perform the atomic operation in memory. */
1575 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1576 s->insn->data);
1579 /* Recompute also for atomic case: needed for setting CC. */
1580 tcg_gen_and_i64(o->out, o->in1, o->in2);
1582 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1583 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1585 return DISAS_NEXT;
1588 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1590 pc_to_link_info(o->out, s, s->pc_tmp);
1591 if (o->in2) {
1592 tcg_gen_mov_i64(psw_addr, o->in2);
1593 per_branch(s, false);
1594 return DISAS_PC_UPDATED;
1595 } else {
1596 return DISAS_NEXT;
1600 static void save_link_info(DisasContext *s, DisasOps *o)
1602 TCGv_i64 t;
1604 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1605 pc_to_link_info(o->out, s, s->pc_tmp);
1606 return;
1608 gen_op_calc_cc(s);
1609 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1610 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1611 t = tcg_temp_new_i64();
1612 tcg_gen_shri_i64(t, psw_mask, 16);
1613 tcg_gen_andi_i64(t, t, 0x0f000000);
1614 tcg_gen_or_i64(o->out, o->out, t);
1615 tcg_gen_extu_i32_i64(t, cc_op);
1616 tcg_gen_shli_i64(t, t, 28);
1617 tcg_gen_or_i64(o->out, o->out, t);
1618 tcg_temp_free_i64(t);
1621 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1623 save_link_info(s, o);
1624 if (o->in2) {
1625 tcg_gen_mov_i64(psw_addr, o->in2);
1626 per_branch(s, false);
1627 return DISAS_PC_UPDATED;
1628 } else {
1629 return DISAS_NEXT;
1633 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1635 pc_to_link_info(o->out, s, s->pc_tmp);
1636 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1639 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1641 int m1 = get_field(s, m1);
1642 bool is_imm = have_field(s, i2);
1643 int imm = is_imm ? get_field(s, i2) : 0;
1644 DisasCompare c;
1646 /* BCR with R2 = 0 causes no branching */
1647 if (have_field(s, r2) && get_field(s, r2) == 0) {
1648 if (m1 == 14) {
1649 /* Perform serialization */
1650 /* FIXME: check for fast-BCR-serialization facility */
1651 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1653 if (m1 == 15) {
1654 /* Perform serialization */
1655 /* FIXME: perform checkpoint-synchronisation */
1656 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1658 return DISAS_NEXT;
1661 disas_jcc(s, &c, m1);
1662 return help_branch(s, &c, is_imm, imm, o->in2);
1665 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1667 int r1 = get_field(s, r1);
1668 bool is_imm = have_field(s, i2);
1669 int imm = is_imm ? get_field(s, i2) : 0;
1670 DisasCompare c;
1671 TCGv_i64 t;
1673 c.cond = TCG_COND_NE;
1674 c.is_64 = false;
1675 c.g1 = false;
1676 c.g2 = false;
1678 t = tcg_temp_new_i64();
1679 tcg_gen_subi_i64(t, regs[r1], 1);
1680 store_reg32_i64(r1, t);
1681 c.u.s32.a = tcg_temp_new_i32();
1682 c.u.s32.b = tcg_const_i32(0);
1683 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1684 tcg_temp_free_i64(t);
1686 return help_branch(s, &c, is_imm, imm, o->in2);
1689 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1691 int r1 = get_field(s, r1);
1692 int imm = get_field(s, i2);
1693 DisasCompare c;
1694 TCGv_i64 t;
1696 c.cond = TCG_COND_NE;
1697 c.is_64 = false;
1698 c.g1 = false;
1699 c.g2 = false;
1701 t = tcg_temp_new_i64();
1702 tcg_gen_shri_i64(t, regs[r1], 32);
1703 tcg_gen_subi_i64(t, t, 1);
1704 store_reg32h_i64(r1, t);
1705 c.u.s32.a = tcg_temp_new_i32();
1706 c.u.s32.b = tcg_const_i32(0);
1707 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1708 tcg_temp_free_i64(t);
1710 return help_branch(s, &c, 1, imm, o->in2);
1713 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1715 int r1 = get_field(s, r1);
1716 bool is_imm = have_field(s, i2);
1717 int imm = is_imm ? get_field(s, i2) : 0;
1718 DisasCompare c;
1720 c.cond = TCG_COND_NE;
1721 c.is_64 = true;
1722 c.g1 = true;
1723 c.g2 = false;
1725 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1726 c.u.s64.a = regs[r1];
1727 c.u.s64.b = tcg_const_i64(0);
1729 return help_branch(s, &c, is_imm, imm, o->in2);
1732 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1734 int r1 = get_field(s, r1);
1735 int r3 = get_field(s, r3);
1736 bool is_imm = have_field(s, i2);
1737 int imm = is_imm ? get_field(s, i2) : 0;
1738 DisasCompare c;
1739 TCGv_i64 t;
1741 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1742 c.is_64 = false;
1743 c.g1 = false;
1744 c.g2 = false;
1746 t = tcg_temp_new_i64();
1747 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1748 c.u.s32.a = tcg_temp_new_i32();
1749 c.u.s32.b = tcg_temp_new_i32();
1750 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1751 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1752 store_reg32_i64(r1, t);
1753 tcg_temp_free_i64(t);
1755 return help_branch(s, &c, is_imm, imm, o->in2);
1758 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1760 int r1 = get_field(s, r1);
1761 int r3 = get_field(s, r3);
1762 bool is_imm = have_field(s, i2);
1763 int imm = is_imm ? get_field(s, i2) : 0;
1764 DisasCompare c;
1766 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1767 c.is_64 = true;
1769 if (r1 == (r3 | 1)) {
1770 c.u.s64.b = load_reg(r3 | 1);
1771 c.g2 = false;
1772 } else {
1773 c.u.s64.b = regs[r3 | 1];
1774 c.g2 = true;
1777 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1778 c.u.s64.a = regs[r1];
1779 c.g1 = true;
1781 return help_branch(s, &c, is_imm, imm, o->in2);
1784 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1786 int imm, m3 = get_field(s, m3);
1787 bool is_imm;
1788 DisasCompare c;
1790 c.cond = ltgt_cond[m3];
1791 if (s->insn->data) {
1792 c.cond = tcg_unsigned_cond(c.cond);
1794 c.is_64 = c.g1 = c.g2 = true;
1795 c.u.s64.a = o->in1;
1796 c.u.s64.b = o->in2;
1798 is_imm = have_field(s, i4);
1799 if (is_imm) {
1800 imm = get_field(s, i4);
1801 } else {
1802 imm = 0;
1803 o->out = get_address(s, 0, get_field(s, b4),
1804 get_field(s, d4));
1807 return help_branch(s, &c, is_imm, imm, o->out);
1810 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1812 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1813 set_cc_static(s);
1814 return DISAS_NEXT;
1817 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1819 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1820 set_cc_static(s);
1821 return DISAS_NEXT;
1824 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1826 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1827 set_cc_static(s);
1828 return DISAS_NEXT;
1831 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1832 bool m4_with_fpe)
1834 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1835 uint8_t m3 = get_field(s, m3);
1836 uint8_t m4 = get_field(s, m4);
1838 /* m3 field was introduced with FPE */
1839 if (!fpe && m3_with_fpe) {
1840 m3 = 0;
1842 /* m4 field was introduced with FPE */
1843 if (!fpe && m4_with_fpe) {
1844 m4 = 0;
1847 /* Check for valid rounding modes. Mode 3 was introduced later. */
1848 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1849 gen_program_exception(s, PGM_SPECIFICATION);
1850 return NULL;
1853 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1856 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1858 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1860 if (!m34) {
1861 return DISAS_NORETURN;
1863 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1864 tcg_temp_free_i32(m34);
1865 gen_set_cc_nz_f32(s, o->in2);
1866 return DISAS_NEXT;
1869 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1871 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1873 if (!m34) {
1874 return DISAS_NORETURN;
1876 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1877 tcg_temp_free_i32(m34);
1878 gen_set_cc_nz_f64(s, o->in2);
1879 return DISAS_NEXT;
1882 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1884 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1886 if (!m34) {
1887 return DISAS_NORETURN;
1889 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1890 tcg_temp_free_i32(m34);
1891 gen_set_cc_nz_f128(s, o->in1, o->in2);
1892 return DISAS_NEXT;
1895 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1897 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1899 if (!m34) {
1900 return DISAS_NORETURN;
1902 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1903 tcg_temp_free_i32(m34);
1904 gen_set_cc_nz_f32(s, o->in2);
1905 return DISAS_NEXT;
1908 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1910 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1912 if (!m34) {
1913 return DISAS_NORETURN;
1915 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1916 tcg_temp_free_i32(m34);
1917 gen_set_cc_nz_f64(s, o->in2);
1918 return DISAS_NEXT;
1921 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1923 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1925 if (!m34) {
1926 return DISAS_NORETURN;
1928 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1929 tcg_temp_free_i32(m34);
1930 gen_set_cc_nz_f128(s, o->in1, o->in2);
1931 return DISAS_NEXT;
1934 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1936 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1938 if (!m34) {
1939 return DISAS_NORETURN;
1941 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1942 tcg_temp_free_i32(m34);
1943 gen_set_cc_nz_f32(s, o->in2);
1944 return DISAS_NEXT;
1947 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1949 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1951 if (!m34) {
1952 return DISAS_NORETURN;
1954 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1955 tcg_temp_free_i32(m34);
1956 gen_set_cc_nz_f64(s, o->in2);
1957 return DISAS_NEXT;
1960 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1962 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1964 if (!m34) {
1965 return DISAS_NORETURN;
1967 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1968 tcg_temp_free_i32(m34);
1969 gen_set_cc_nz_f128(s, o->in1, o->in2);
1970 return DISAS_NEXT;
1973 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1975 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1977 if (!m34) {
1978 return DISAS_NORETURN;
1980 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1981 tcg_temp_free_i32(m34);
1982 gen_set_cc_nz_f32(s, o->in2);
1983 return DISAS_NEXT;
1986 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1988 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1990 if (!m34) {
1991 return DISAS_NORETURN;
1993 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1994 tcg_temp_free_i32(m34);
1995 gen_set_cc_nz_f64(s, o->in2);
1996 return DISAS_NEXT;
1999 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
2001 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2003 if (!m34) {
2004 return DISAS_NORETURN;
2006 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
2007 tcg_temp_free_i32(m34);
2008 gen_set_cc_nz_f128(s, o->in1, o->in2);
2009 return DISAS_NEXT;
2012 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
2014 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2016 if (!m34) {
2017 return DISAS_NORETURN;
2019 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
2020 tcg_temp_free_i32(m34);
2021 return DISAS_NEXT;
2024 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
2026 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2028 if (!m34) {
2029 return DISAS_NORETURN;
2031 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2032 tcg_temp_free_i32(m34);
2033 return DISAS_NEXT;
2036 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2038 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2040 if (!m34) {
2041 return DISAS_NORETURN;
2043 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2044 tcg_temp_free_i32(m34);
2045 return_low128(o->out2);
2046 return DISAS_NEXT;
2049 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2051 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2053 if (!m34) {
2054 return DISAS_NORETURN;
2056 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2057 tcg_temp_free_i32(m34);
2058 return DISAS_NEXT;
2061 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2063 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2065 if (!m34) {
2066 return DISAS_NORETURN;
2068 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2069 tcg_temp_free_i32(m34);
2070 return DISAS_NEXT;
2073 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2075 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2077 if (!m34) {
2078 return DISAS_NORETURN;
2080 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2081 tcg_temp_free_i32(m34);
2082 return_low128(o->out2);
2083 return DISAS_NEXT;
2086 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2088 int r2 = get_field(s, r2);
2089 TCGv_i64 len = tcg_temp_new_i64();
2091 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2092 set_cc_static(s);
2093 return_low128(o->out);
2095 tcg_gen_add_i64(regs[r2], regs[r2], len);
2096 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2097 tcg_temp_free_i64(len);
2099 return DISAS_NEXT;
2102 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2104 int l = get_field(s, l1);
2105 TCGv_i32 vl;
2107 switch (l + 1) {
2108 case 1:
2109 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2110 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2111 break;
2112 case 2:
2113 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2114 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2115 break;
2116 case 4:
2117 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2118 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2119 break;
2120 case 8:
2121 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2122 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2123 break;
2124 default:
2125 vl = tcg_const_i32(l);
2126 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2127 tcg_temp_free_i32(vl);
2128 set_cc_static(s);
2129 return DISAS_NEXT;
2131 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2132 return DISAS_NEXT;
2135 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2137 int r1 = get_field(s, r1);
2138 int r2 = get_field(s, r2);
2139 TCGv_i32 t1, t2;
2141 /* r1 and r2 must be even. */
2142 if (r1 & 1 || r2 & 1) {
2143 gen_program_exception(s, PGM_SPECIFICATION);
2144 return DISAS_NORETURN;
2147 t1 = tcg_const_i32(r1);
2148 t2 = tcg_const_i32(r2);
2149 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2150 tcg_temp_free_i32(t1);
2151 tcg_temp_free_i32(t2);
2152 set_cc_static(s);
2153 return DISAS_NEXT;
2156 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2158 int r1 = get_field(s, r1);
2159 int r3 = get_field(s, r3);
2160 TCGv_i32 t1, t3;
2162 /* r1 and r3 must be even. */
2163 if (r1 & 1 || r3 & 1) {
2164 gen_program_exception(s, PGM_SPECIFICATION);
2165 return DISAS_NORETURN;
2168 t1 = tcg_const_i32(r1);
2169 t3 = tcg_const_i32(r3);
2170 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2171 tcg_temp_free_i32(t1);
2172 tcg_temp_free_i32(t3);
2173 set_cc_static(s);
2174 return DISAS_NEXT;
2177 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2179 int r1 = get_field(s, r1);
2180 int r3 = get_field(s, r3);
2181 TCGv_i32 t1, t3;
2183 /* r1 and r3 must be even. */
2184 if (r1 & 1 || r3 & 1) {
2185 gen_program_exception(s, PGM_SPECIFICATION);
2186 return DISAS_NORETURN;
2189 t1 = tcg_const_i32(r1);
2190 t3 = tcg_const_i32(r3);
2191 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2192 tcg_temp_free_i32(t1);
2193 tcg_temp_free_i32(t3);
2194 set_cc_static(s);
2195 return DISAS_NEXT;
2198 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2200 TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2201 TCGv_i32 t1 = tcg_temp_new_i32();
2202 tcg_gen_extrl_i64_i32(t1, o->in1);
2203 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2204 set_cc_static(s);
2205 tcg_temp_free_i32(t1);
2206 tcg_temp_free_i32(m3);
2207 return DISAS_NEXT;
2210 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2212 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2213 set_cc_static(s);
2214 return_low128(o->in2);
2215 return DISAS_NEXT;
2218 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2220 TCGv_i64 t = tcg_temp_new_i64();
2221 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2222 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2223 tcg_gen_or_i64(o->out, o->out, t);
2224 tcg_temp_free_i64(t);
2225 return DISAS_NEXT;
2228 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2230 int d2 = get_field(s, d2);
2231 int b2 = get_field(s, b2);
2232 TCGv_i64 addr, cc;
2234 /* Note that in1 = R3 (new value) and
2235 in2 = (zero-extended) R1 (expected value). */
2237 addr = get_address(s, 0, b2, d2);
2238 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2239 get_mem_index(s), s->insn->data | MO_ALIGN);
2240 tcg_temp_free_i64(addr);
2242 /* Are the memory and expected values (un)equal? Note that this setcond
2243 produces the output CC value, thus the NE sense of the test. */
2244 cc = tcg_temp_new_i64();
2245 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2246 tcg_gen_extrl_i64_i32(cc_op, cc);
2247 tcg_temp_free_i64(cc);
2248 set_cc_static(s);
2250 return DISAS_NEXT;
2253 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2255 int r1 = get_field(s, r1);
2256 int r3 = get_field(s, r3);
2257 int d2 = get_field(s, d2);
2258 int b2 = get_field(s, b2);
2259 DisasJumpType ret = DISAS_NEXT;
2260 TCGv_i64 addr;
2261 TCGv_i32 t_r1, t_r3;
2263 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2264 addr = get_address(s, 0, b2, d2);
2265 t_r1 = tcg_const_i32(r1);
2266 t_r3 = tcg_const_i32(r3);
2267 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2268 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2269 } else if (HAVE_CMPXCHG128) {
2270 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2271 } else {
2272 gen_helper_exit_atomic(cpu_env);
2273 ret = DISAS_NORETURN;
2275 tcg_temp_free_i64(addr);
2276 tcg_temp_free_i32(t_r1);
2277 tcg_temp_free_i32(t_r3);
2279 set_cc_static(s);
2280 return ret;
2283 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2285 int r3 = get_field(s, r3);
2286 TCGv_i32 t_r3 = tcg_const_i32(r3);
2288 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2289 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2290 } else {
2291 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2293 tcg_temp_free_i32(t_r3);
2295 set_cc_static(s);
2296 return DISAS_NEXT;
2299 #ifndef CONFIG_USER_ONLY
2300 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2302 MemOp mop = s->insn->data;
2303 TCGv_i64 addr, old, cc;
2304 TCGLabel *lab = gen_new_label();
2306 /* Note that in1 = R1 (zero-extended expected value),
2307 out = R1 (original reg), out2 = R1+1 (new value). */
2309 addr = tcg_temp_new_i64();
2310 old = tcg_temp_new_i64();
2311 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2312 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2313 get_mem_index(s), mop | MO_ALIGN);
2314 tcg_temp_free_i64(addr);
2316 /* Are the memory and expected values (un)equal? */
2317 cc = tcg_temp_new_i64();
2318 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2319 tcg_gen_extrl_i64_i32(cc_op, cc);
2321 /* Write back the output now, so that it happens before the
2322 following branch, so that we don't need local temps. */
2323 if ((mop & MO_SIZE) == MO_32) {
2324 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2325 } else {
2326 tcg_gen_mov_i64(o->out, old);
2328 tcg_temp_free_i64(old);
2330 /* If the comparison was equal, and the LSB of R2 was set,
2331 then we need to flush the TLB (for all cpus). */
2332 tcg_gen_xori_i64(cc, cc, 1);
2333 tcg_gen_and_i64(cc, cc, o->in2);
2334 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2335 tcg_temp_free_i64(cc);
2337 gen_helper_purge(cpu_env);
2338 gen_set_label(lab);
2340 return DISAS_NEXT;
2342 #endif
2344 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2346 TCGv_i64 t1 = tcg_temp_new_i64();
2347 TCGv_i32 t2 = tcg_temp_new_i32();
2348 tcg_gen_extrl_i64_i32(t2, o->in1);
2349 gen_helper_cvd(t1, t2);
2350 tcg_temp_free_i32(t2);
2351 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2352 tcg_temp_free_i64(t1);
2353 return DISAS_NEXT;
2356 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2358 int m3 = get_field(s, m3);
2359 TCGLabel *lab = gen_new_label();
2360 TCGCond c;
2362 c = tcg_invert_cond(ltgt_cond[m3]);
2363 if (s->insn->data) {
2364 c = tcg_unsigned_cond(c);
2366 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2368 /* Trap. */
2369 gen_trap(s);
2371 gen_set_label(lab);
2372 return DISAS_NEXT;
2375 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2377 int m3 = get_field(s, m3);
2378 int r1 = get_field(s, r1);
2379 int r2 = get_field(s, r2);
2380 TCGv_i32 tr1, tr2, chk;
2382 /* R1 and R2 must both be even. */
2383 if ((r1 | r2) & 1) {
2384 gen_program_exception(s, PGM_SPECIFICATION);
2385 return DISAS_NORETURN;
2387 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2388 m3 = 0;
2391 tr1 = tcg_const_i32(r1);
2392 tr2 = tcg_const_i32(r2);
2393 chk = tcg_const_i32(m3);
2395 switch (s->insn->data) {
2396 case 12:
2397 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2398 break;
2399 case 14:
2400 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2401 break;
2402 case 21:
2403 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2404 break;
2405 case 24:
2406 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2407 break;
2408 case 41:
2409 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2410 break;
2411 case 42:
2412 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2413 break;
2414 default:
2415 g_assert_not_reached();
2418 tcg_temp_free_i32(tr1);
2419 tcg_temp_free_i32(tr2);
2420 tcg_temp_free_i32(chk);
2421 set_cc_static(s);
2422 return DISAS_NEXT;
2425 #ifndef CONFIG_USER_ONLY
2426 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2428 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2429 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2430 TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2432 gen_helper_diag(cpu_env, r1, r3, func_code);
2434 tcg_temp_free_i32(func_code);
2435 tcg_temp_free_i32(r3);
2436 tcg_temp_free_i32(r1);
2437 return DISAS_NEXT;
2439 #endif
2441 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2443 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2444 return_low128(o->out);
2445 return DISAS_NEXT;
2448 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2450 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2451 return_low128(o->out);
2452 return DISAS_NEXT;
2455 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2457 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2458 return_low128(o->out);
2459 return DISAS_NEXT;
2462 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2464 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2465 return_low128(o->out);
2466 return DISAS_NEXT;
2469 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2471 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2472 return DISAS_NEXT;
2475 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2477 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2478 return DISAS_NEXT;
2481 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2483 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2484 return_low128(o->out2);
2485 return DISAS_NEXT;
2488 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2490 int r2 = get_field(s, r2);
2491 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2492 return DISAS_NEXT;
2495 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2497 /* No cache information provided. */
2498 tcg_gen_movi_i64(o->out, -1);
2499 return DISAS_NEXT;
2502 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2504 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2505 return DISAS_NEXT;
2508 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2510 int r1 = get_field(s, r1);
2511 int r2 = get_field(s, r2);
2512 TCGv_i64 t = tcg_temp_new_i64();
2514 /* Note the "subsequently" in the PoO, which implies a defined result
2515 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2516 tcg_gen_shri_i64(t, psw_mask, 32);
2517 store_reg32_i64(r1, t);
2518 if (r2 != 0) {
2519 store_reg32_i64(r2, psw_mask);
2522 tcg_temp_free_i64(t);
2523 return DISAS_NEXT;
2526 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2528 int r1 = get_field(s, r1);
2529 TCGv_i32 ilen;
2530 TCGv_i64 v1;
2532 /* Nested EXECUTE is not allowed. */
2533 if (unlikely(s->ex_value)) {
2534 gen_program_exception(s, PGM_EXECUTE);
2535 return DISAS_NORETURN;
2538 update_psw_addr(s);
2539 update_cc_op(s);
2541 if (r1 == 0) {
2542 v1 = tcg_const_i64(0);
2543 } else {
2544 v1 = regs[r1];
2547 ilen = tcg_const_i32(s->ilen);
2548 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2549 tcg_temp_free_i32(ilen);
2551 if (r1 == 0) {
2552 tcg_temp_free_i64(v1);
2555 return DISAS_PC_CC_UPDATED;
2558 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2560 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2562 if (!m34) {
2563 return DISAS_NORETURN;
2565 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2566 tcg_temp_free_i32(m34);
2567 return DISAS_NEXT;
2570 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2572 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2574 if (!m34) {
2575 return DISAS_NORETURN;
2577 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2578 tcg_temp_free_i32(m34);
2579 return DISAS_NEXT;
2582 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2584 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2586 if (!m34) {
2587 return DISAS_NORETURN;
2589 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2590 return_low128(o->out2);
2591 tcg_temp_free_i32(m34);
2592 return DISAS_NEXT;
2595 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2597 /* We'll use the original input for cc computation, since we get to
2598 compare that against 0, which ought to be better than comparing
2599 the real output against 64. It also lets cc_dst be a convenient
2600 temporary during our computation. */
2601 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2603 /* R1 = IN ? CLZ(IN) : 64. */
2604 tcg_gen_clzi_i64(o->out, o->in2, 64);
2606 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2607 value by 64, which is undefined. But since the shift is 64 iff the
2608 input is zero, we still get the correct result after and'ing. */
2609 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2610 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2611 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2612 return DISAS_NEXT;
2615 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2617 int m3 = get_field(s, m3);
2618 int pos, len, base = s->insn->data;
2619 TCGv_i64 tmp = tcg_temp_new_i64();
2620 uint64_t ccm;
2622 switch (m3) {
2623 case 0xf:
2624 /* Effectively a 32-bit load. */
2625 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2626 len = 32;
2627 goto one_insert;
2629 case 0xc:
2630 case 0x6:
2631 case 0x3:
2632 /* Effectively a 16-bit load. */
2633 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2634 len = 16;
2635 goto one_insert;
2637 case 0x8:
2638 case 0x4:
2639 case 0x2:
2640 case 0x1:
2641 /* Effectively an 8-bit load. */
2642 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2643 len = 8;
2644 goto one_insert;
2646 one_insert:
2647 pos = base + ctz32(m3) * 8;
2648 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2649 ccm = ((1ull << len) - 1) << pos;
2650 break;
2652 default:
2653 /* This is going to be a sequence of loads and inserts. */
2654 pos = base + 32 - 8;
2655 ccm = 0;
2656 while (m3) {
2657 if (m3 & 0x8) {
2658 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2659 tcg_gen_addi_i64(o->in2, o->in2, 1);
2660 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2661 ccm |= 0xff << pos;
2663 m3 = (m3 << 1) & 0xf;
2664 pos -= 8;
2666 break;
2669 tcg_gen_movi_i64(tmp, ccm);
2670 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2671 tcg_temp_free_i64(tmp);
2672 return DISAS_NEXT;
2675 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2677 int shift = s->insn->data & 0xff;
2678 int size = s->insn->data >> 8;
2679 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2680 return DISAS_NEXT;
2683 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2685 TCGv_i64 t1, t2;
2687 gen_op_calc_cc(s);
2688 t1 = tcg_temp_new_i64();
2689 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2690 t2 = tcg_temp_new_i64();
2691 tcg_gen_extu_i32_i64(t2, cc_op);
2692 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2693 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2694 tcg_temp_free_i64(t1);
2695 tcg_temp_free_i64(t2);
2696 return DISAS_NEXT;
2699 #ifndef CONFIG_USER_ONLY
2700 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2702 TCGv_i32 m4;
2704 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2705 m4 = tcg_const_i32(get_field(s, m4));
2706 } else {
2707 m4 = tcg_const_i32(0);
2709 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2710 tcg_temp_free_i32(m4);
2711 return DISAS_NEXT;
2714 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2716 TCGv_i32 m4;
2718 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2719 m4 = tcg_const_i32(get_field(s, m4));
2720 } else {
2721 m4 = tcg_const_i32(0);
2723 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2724 tcg_temp_free_i32(m4);
2725 return DISAS_NEXT;
2728 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2730 gen_helper_iske(o->out, cpu_env, o->in2);
2731 return DISAS_NEXT;
2733 #endif
2735 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2737 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2738 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2739 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2740 TCGv_i32 t_r1, t_r2, t_r3, type;
2742 switch (s->insn->data) {
2743 case S390_FEAT_TYPE_KMA:
2744 if (r3 == r1 || r3 == r2) {
2745 gen_program_exception(s, PGM_SPECIFICATION);
2746 return DISAS_NORETURN;
2748 /* FALL THROUGH */
2749 case S390_FEAT_TYPE_KMCTR:
2750 if (r3 & 1 || !r3) {
2751 gen_program_exception(s, PGM_SPECIFICATION);
2752 return DISAS_NORETURN;
2754 /* FALL THROUGH */
2755 case S390_FEAT_TYPE_PPNO:
2756 case S390_FEAT_TYPE_KMF:
2757 case S390_FEAT_TYPE_KMC:
2758 case S390_FEAT_TYPE_KMO:
2759 case S390_FEAT_TYPE_KM:
2760 if (r1 & 1 || !r1) {
2761 gen_program_exception(s, PGM_SPECIFICATION);
2762 return DISAS_NORETURN;
2764 /* FALL THROUGH */
2765 case S390_FEAT_TYPE_KMAC:
2766 case S390_FEAT_TYPE_KIMD:
2767 case S390_FEAT_TYPE_KLMD:
2768 if (r2 & 1 || !r2) {
2769 gen_program_exception(s, PGM_SPECIFICATION);
2770 return DISAS_NORETURN;
2772 /* FALL THROUGH */
2773 case S390_FEAT_TYPE_PCKMO:
2774 case S390_FEAT_TYPE_PCC:
2775 break;
2776 default:
2777 g_assert_not_reached();
2780 t_r1 = tcg_const_i32(r1);
2781 t_r2 = tcg_const_i32(r2);
2782 t_r3 = tcg_const_i32(r3);
2783 type = tcg_const_i32(s->insn->data);
2784 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2785 set_cc_static(s);
2786 tcg_temp_free_i32(t_r1);
2787 tcg_temp_free_i32(t_r2);
2788 tcg_temp_free_i32(t_r3);
2789 tcg_temp_free_i32(type);
2790 return DISAS_NEXT;
2793 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2795 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2796 set_cc_static(s);
2797 return DISAS_NEXT;
2800 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2802 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2803 set_cc_static(s);
2804 return DISAS_NEXT;
2807 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2809 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2810 set_cc_static(s);
2811 return DISAS_NEXT;
2814 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2816 /* The real output is indeed the original value in memory;
2817 recompute the addition for the computation of CC. */
2818 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2819 s->insn->data | MO_ALIGN);
2820 /* However, we need to recompute the addition for setting CC. */
2821 tcg_gen_add_i64(o->out, o->in1, o->in2);
2822 return DISAS_NEXT;
2825 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2827 /* The real output is indeed the original value in memory;
2828 recompute the addition for the computation of CC. */
2829 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2830 s->insn->data | MO_ALIGN);
2831 /* However, we need to recompute the operation for setting CC. */
2832 tcg_gen_and_i64(o->out, o->in1, o->in2);
2833 return DISAS_NEXT;
2836 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2838 /* The real output is indeed the original value in memory;
2839 recompute the addition for the computation of CC. */
2840 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2841 s->insn->data | MO_ALIGN);
2842 /* However, we need to recompute the operation for setting CC. */
2843 tcg_gen_or_i64(o->out, o->in1, o->in2);
2844 return DISAS_NEXT;
2847 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2849 /* The real output is indeed the original value in memory;
2850 recompute the addition for the computation of CC. */
2851 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2852 s->insn->data | MO_ALIGN);
2853 /* However, we need to recompute the operation for setting CC. */
2854 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2855 return DISAS_NEXT;
2858 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2860 gen_helper_ldeb(o->out, cpu_env, o->in2);
2861 return DISAS_NEXT;
2864 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2866 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2868 if (!m34) {
2869 return DISAS_NORETURN;
2871 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2872 tcg_temp_free_i32(m34);
2873 return DISAS_NEXT;
2876 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2878 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2880 if (!m34) {
2881 return DISAS_NORETURN;
2883 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2884 tcg_temp_free_i32(m34);
2885 return DISAS_NEXT;
2888 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2890 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2892 if (!m34) {
2893 return DISAS_NORETURN;
2895 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2896 tcg_temp_free_i32(m34);
2897 return DISAS_NEXT;
2900 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2902 gen_helper_lxdb(o->out, cpu_env, o->in2);
2903 return_low128(o->out2);
2904 return DISAS_NEXT;
2907 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2909 gen_helper_lxeb(o->out, cpu_env, o->in2);
2910 return_low128(o->out2);
2911 return DISAS_NEXT;
2914 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2916 tcg_gen_shli_i64(o->out, o->in2, 32);
2917 return DISAS_NEXT;
2920 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2922 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2923 return DISAS_NEXT;
2926 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2928 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2929 return DISAS_NEXT;
2932 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2934 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2935 return DISAS_NEXT;
2938 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2940 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2941 return DISAS_NEXT;
2944 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2946 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2947 return DISAS_NEXT;
2950 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2952 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2953 return DISAS_NEXT;
2956 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2958 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2959 return DISAS_NEXT;
2962 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2964 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2965 return DISAS_NEXT;
2968 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2970 TCGLabel *lab = gen_new_label();
2971 store_reg32_i64(get_field(s, r1), o->in2);
2972 /* The value is stored even in case of trap. */
2973 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2974 gen_trap(s);
2975 gen_set_label(lab);
2976 return DISAS_NEXT;
2979 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2981 TCGLabel *lab = gen_new_label();
2982 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2983 /* The value is stored even in case of trap. */
2984 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2985 gen_trap(s);
2986 gen_set_label(lab);
2987 return DISAS_NEXT;
2990 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2992 TCGLabel *lab = gen_new_label();
2993 store_reg32h_i64(get_field(s, r1), o->in2);
2994 /* The value is stored even in case of trap. */
2995 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2996 gen_trap(s);
2997 gen_set_label(lab);
2998 return DISAS_NEXT;
3001 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
3003 TCGLabel *lab = gen_new_label();
3004 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
3005 /* The value is stored even in case of trap. */
3006 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
3007 gen_trap(s);
3008 gen_set_label(lab);
3009 return DISAS_NEXT;
3012 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
3014 TCGLabel *lab = gen_new_label();
3015 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
3016 /* The value is stored even in case of trap. */
3017 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
3018 gen_trap(s);
3019 gen_set_label(lab);
3020 return DISAS_NEXT;
3023 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
3025 DisasCompare c;
3027 disas_jcc(s, &c, get_field(s, m3));
3029 if (c.is_64) {
3030 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
3031 o->in2, o->in1);
3032 free_compare(&c);
3033 } else {
3034 TCGv_i32 t32 = tcg_temp_new_i32();
3035 TCGv_i64 t, z;
3037 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3038 free_compare(&c);
3040 t = tcg_temp_new_i64();
3041 tcg_gen_extu_i32_i64(t, t32);
3042 tcg_temp_free_i32(t32);
3044 z = tcg_const_i64(0);
3045 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3046 tcg_temp_free_i64(t);
3047 tcg_temp_free_i64(z);
3050 return DISAS_NEXT;
3053 #ifndef CONFIG_USER_ONLY
3054 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3056 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3057 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3058 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3059 tcg_temp_free_i32(r1);
3060 tcg_temp_free_i32(r3);
3061 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3062 return DISAS_PC_STALE_NOCHAIN;
3065 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3067 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3068 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3069 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3070 tcg_temp_free_i32(r1);
3071 tcg_temp_free_i32(r3);
3072 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3073 return DISAS_PC_STALE_NOCHAIN;
3076 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3078 gen_helper_lra(o->out, cpu_env, o->in2);
3079 set_cc_static(s);
3080 return DISAS_NEXT;
3083 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3085 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3086 return DISAS_NEXT;
3089 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3091 TCGv_i64 t1, t2;
3093 per_breaking_event(s);
3095 t1 = tcg_temp_new_i64();
3096 t2 = tcg_temp_new_i64();
3097 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3098 MO_TEUL | MO_ALIGN_8);
3099 tcg_gen_addi_i64(o->in2, o->in2, 4);
3100 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3101 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3102 tcg_gen_shli_i64(t1, t1, 32);
3103 gen_helper_load_psw(cpu_env, t1, t2);
3104 tcg_temp_free_i64(t1);
3105 tcg_temp_free_i64(t2);
3106 return DISAS_NORETURN;
3109 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3111 TCGv_i64 t1, t2;
3113 per_breaking_event(s);
3115 t1 = tcg_temp_new_i64();
3116 t2 = tcg_temp_new_i64();
3117 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3118 MO_TEQ | MO_ALIGN_8);
3119 tcg_gen_addi_i64(o->in2, o->in2, 8);
3120 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3121 gen_helper_load_psw(cpu_env, t1, t2);
3122 tcg_temp_free_i64(t1);
3123 tcg_temp_free_i64(t2);
3124 return DISAS_NORETURN;
3126 #endif
3128 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3130 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3131 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3132 gen_helper_lam(cpu_env, r1, o->in2, r3);
3133 tcg_temp_free_i32(r1);
3134 tcg_temp_free_i32(r3);
3135 return DISAS_NEXT;
3138 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3140 int r1 = get_field(s, r1);
3141 int r3 = get_field(s, r3);
3142 TCGv_i64 t1, t2;
3144 /* Only one register to read. */
3145 t1 = tcg_temp_new_i64();
3146 if (unlikely(r1 == r3)) {
3147 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3148 store_reg32_i64(r1, t1);
3149 tcg_temp_free(t1);
3150 return DISAS_NEXT;
3153 /* First load the values of the first and last registers to trigger
3154 possible page faults. */
3155 t2 = tcg_temp_new_i64();
3156 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3157 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3158 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3159 store_reg32_i64(r1, t1);
3160 store_reg32_i64(r3, t2);
3162 /* Only two registers to read. */
3163 if (((r1 + 1) & 15) == r3) {
3164 tcg_temp_free(t2);
3165 tcg_temp_free(t1);
3166 return DISAS_NEXT;
3169 /* Then load the remaining registers. Page fault can't occur. */
3170 r3 = (r3 - 1) & 15;
3171 tcg_gen_movi_i64(t2, 4);
3172 while (r1 != r3) {
3173 r1 = (r1 + 1) & 15;
3174 tcg_gen_add_i64(o->in2, o->in2, t2);
3175 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3176 store_reg32_i64(r1, t1);
3178 tcg_temp_free(t2);
3179 tcg_temp_free(t1);
3181 return DISAS_NEXT;
3184 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3186 int r1 = get_field(s, r1);
3187 int r3 = get_field(s, r3);
3188 TCGv_i64 t1, t2;
3190 /* Only one register to read. */
3191 t1 = tcg_temp_new_i64();
3192 if (unlikely(r1 == r3)) {
3193 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3194 store_reg32h_i64(r1, t1);
3195 tcg_temp_free(t1);
3196 return DISAS_NEXT;
3199 /* First load the values of the first and last registers to trigger
3200 possible page faults. */
3201 t2 = tcg_temp_new_i64();
3202 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3203 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3204 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3205 store_reg32h_i64(r1, t1);
3206 store_reg32h_i64(r3, t2);
3208 /* Only two registers to read. */
3209 if (((r1 + 1) & 15) == r3) {
3210 tcg_temp_free(t2);
3211 tcg_temp_free(t1);
3212 return DISAS_NEXT;
3215 /* Then load the remaining registers. Page fault can't occur. */
3216 r3 = (r3 - 1) & 15;
3217 tcg_gen_movi_i64(t2, 4);
3218 while (r1 != r3) {
3219 r1 = (r1 + 1) & 15;
3220 tcg_gen_add_i64(o->in2, o->in2, t2);
3221 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3222 store_reg32h_i64(r1, t1);
3224 tcg_temp_free(t2);
3225 tcg_temp_free(t1);
3227 return DISAS_NEXT;
3230 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3232 int r1 = get_field(s, r1);
3233 int r3 = get_field(s, r3);
3234 TCGv_i64 t1, t2;
3236 /* Only one register to read. */
3237 if (unlikely(r1 == r3)) {
3238 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3239 return DISAS_NEXT;
3242 /* First load the values of the first and last registers to trigger
3243 possible page faults. */
3244 t1 = tcg_temp_new_i64();
3245 t2 = tcg_temp_new_i64();
3246 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3247 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3248 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3249 tcg_gen_mov_i64(regs[r1], t1);
3250 tcg_temp_free(t2);
3252 /* Only two registers to read. */
3253 if (((r1 + 1) & 15) == r3) {
3254 tcg_temp_free(t1);
3255 return DISAS_NEXT;
3258 /* Then load the remaining registers. Page fault can't occur. */
3259 r3 = (r3 - 1) & 15;
3260 tcg_gen_movi_i64(t1, 8);
3261 while (r1 != r3) {
3262 r1 = (r1 + 1) & 15;
3263 tcg_gen_add_i64(o->in2, o->in2, t1);
3264 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3266 tcg_temp_free(t1);
3268 return DISAS_NEXT;
3271 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3273 TCGv_i64 a1, a2;
3274 MemOp mop = s->insn->data;
3276 /* In a parallel context, stop the world and single step. */
3277 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3278 update_psw_addr(s);
3279 update_cc_op(s);
3280 gen_exception(EXCP_ATOMIC);
3281 return DISAS_NORETURN;
3284 /* In a serial context, perform the two loads ... */
3285 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3286 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3287 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3288 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3289 tcg_temp_free_i64(a1);
3290 tcg_temp_free_i64(a2);
3292 /* ... and indicate that we performed them while interlocked. */
3293 gen_op_movi_cc(s, 0);
3294 return DISAS_NEXT;
3297 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3299 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3300 gen_helper_lpq(o->out, cpu_env, o->in2);
3301 } else if (HAVE_ATOMIC128) {
3302 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3303 } else {
3304 gen_helper_exit_atomic(cpu_env);
3305 return DISAS_NORETURN;
3307 return_low128(o->out2);
3308 return DISAS_NEXT;
3311 #ifndef CONFIG_USER_ONLY
3312 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3314 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
3315 tcg_gen_qemu_ld_tl(o->out, o->addr1, MMU_REAL_IDX, s->insn->data);
3316 return DISAS_NEXT;
3318 #endif
3320 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3322 tcg_gen_andi_i64(o->out, o->in2, -256);
3323 return DISAS_NEXT;
3326 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3328 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3330 if (get_field(s, m3) > 6) {
3331 gen_program_exception(s, PGM_SPECIFICATION);
3332 return DISAS_NORETURN;
3335 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3336 tcg_gen_neg_i64(o->addr1, o->addr1);
3337 tcg_gen_movi_i64(o->out, 16);
3338 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3339 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3340 return DISAS_NEXT;
3343 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3345 #if !defined(CONFIG_USER_ONLY)
3346 TCGv_i32 i2;
3347 #endif
3348 const uint16_t monitor_class = get_field(s, i2);
3350 if (monitor_class & 0xff00) {
3351 gen_program_exception(s, PGM_SPECIFICATION);
3352 return DISAS_NORETURN;
3355 #if !defined(CONFIG_USER_ONLY)
3356 i2 = tcg_const_i32(monitor_class);
3357 gen_helper_monitor_call(cpu_env, o->addr1, i2);
3358 tcg_temp_free_i32(i2);
3359 #endif
3360 /* Defaults to a NOP. */
3361 return DISAS_NEXT;
3364 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3366 o->out = o->in2;
3367 o->g_out = o->g_in2;
3368 o->in2 = NULL;
3369 o->g_in2 = false;
3370 return DISAS_NEXT;
3373 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3375 int b2 = get_field(s, b2);
3376 TCGv ar1 = tcg_temp_new_i64();
3378 o->out = o->in2;
3379 o->g_out = o->g_in2;
3380 o->in2 = NULL;
3381 o->g_in2 = false;
3383 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3384 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3385 tcg_gen_movi_i64(ar1, 0);
3386 break;
3387 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3388 tcg_gen_movi_i64(ar1, 1);
3389 break;
3390 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3391 if (b2) {
3392 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3393 } else {
3394 tcg_gen_movi_i64(ar1, 0);
3396 break;
3397 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3398 tcg_gen_movi_i64(ar1, 2);
3399 break;
3402 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3403 tcg_temp_free_i64(ar1);
3405 return DISAS_NEXT;
3408 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3410 o->out = o->in1;
3411 o->out2 = o->in2;
3412 o->g_out = o->g_in1;
3413 o->g_out2 = o->g_in2;
3414 o->in1 = NULL;
3415 o->in2 = NULL;
3416 o->g_in1 = o->g_in2 = false;
3417 return DISAS_NEXT;
3420 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3422 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3423 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3424 tcg_temp_free_i32(l);
3425 return DISAS_NEXT;
3428 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3430 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3431 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3432 tcg_temp_free_i32(l);
3433 return DISAS_NEXT;
3436 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3438 int r1 = get_field(s, r1);
3439 int r2 = get_field(s, r2);
3440 TCGv_i32 t1, t2;
3442 /* r1 and r2 must be even. */
3443 if (r1 & 1 || r2 & 1) {
3444 gen_program_exception(s, PGM_SPECIFICATION);
3445 return DISAS_NORETURN;
3448 t1 = tcg_const_i32(r1);
3449 t2 = tcg_const_i32(r2);
3450 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3451 tcg_temp_free_i32(t1);
3452 tcg_temp_free_i32(t2);
3453 set_cc_static(s);
3454 return DISAS_NEXT;
3457 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3459 int r1 = get_field(s, r1);
3460 int r3 = get_field(s, r3);
3461 TCGv_i32 t1, t3;
3463 /* r1 and r3 must be even. */
3464 if (r1 & 1 || r3 & 1) {
3465 gen_program_exception(s, PGM_SPECIFICATION);
3466 return DISAS_NORETURN;
3469 t1 = tcg_const_i32(r1);
3470 t3 = tcg_const_i32(r3);
3471 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3472 tcg_temp_free_i32(t1);
3473 tcg_temp_free_i32(t3);
3474 set_cc_static(s);
3475 return DISAS_NEXT;
3478 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3480 int r1 = get_field(s, r1);
3481 int r3 = get_field(s, r3);
3482 TCGv_i32 t1, t3;
3484 /* r1 and r3 must be even. */
3485 if (r1 & 1 || r3 & 1) {
3486 gen_program_exception(s, PGM_SPECIFICATION);
3487 return DISAS_NORETURN;
3490 t1 = tcg_const_i32(r1);
3491 t3 = tcg_const_i32(r3);
3492 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3493 tcg_temp_free_i32(t1);
3494 tcg_temp_free_i32(t3);
3495 set_cc_static(s);
3496 return DISAS_NEXT;
3499 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3501 int r3 = get_field(s, r3);
3502 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3503 set_cc_static(s);
3504 return DISAS_NEXT;
3507 #ifndef CONFIG_USER_ONLY
3508 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3510 int r1 = get_field(s, l1);
3511 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3512 set_cc_static(s);
3513 return DISAS_NEXT;
3516 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3518 int r1 = get_field(s, l1);
3519 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3520 set_cc_static(s);
3521 return DISAS_NEXT;
3523 #endif
3525 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3527 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3528 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3529 tcg_temp_free_i32(l);
3530 return DISAS_NEXT;
3533 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3535 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3536 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3537 tcg_temp_free_i32(l);
3538 return DISAS_NEXT;
3541 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3543 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3544 set_cc_static(s);
3545 return DISAS_NEXT;
3548 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3550 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3551 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3553 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3554 tcg_temp_free_i32(t1);
3555 tcg_temp_free_i32(t2);
3556 set_cc_static(s);
3557 return DISAS_NEXT;
3560 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3562 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3563 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3564 tcg_temp_free_i32(l);
3565 return DISAS_NEXT;
3568 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3570 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3571 return DISAS_NEXT;
3574 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3576 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3577 return DISAS_NEXT;
3580 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3582 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3583 return DISAS_NEXT;
3586 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3588 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3589 return DISAS_NEXT;
3592 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3594 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3595 return DISAS_NEXT;
3598 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3600 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3601 return DISAS_NEXT;
3604 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3606 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3607 return_low128(o->out2);
3608 return DISAS_NEXT;
3611 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3613 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3614 return_low128(o->out2);
3615 return DISAS_NEXT;
3618 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3620 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3621 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3622 tcg_temp_free_i64(r3);
3623 return DISAS_NEXT;
3626 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3628 TCGv_i64 r3 = load_freg(get_field(s, r3));
3629 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3630 tcg_temp_free_i64(r3);
3631 return DISAS_NEXT;
3634 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3636 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3637 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3638 tcg_temp_free_i64(r3);
3639 return DISAS_NEXT;
3642 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3644 TCGv_i64 r3 = load_freg(get_field(s, r3));
3645 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3646 tcg_temp_free_i64(r3);
3647 return DISAS_NEXT;
3650 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3652 TCGv_i64 z, n;
3653 z = tcg_const_i64(0);
3654 n = tcg_temp_new_i64();
3655 tcg_gen_neg_i64(n, o->in2);
3656 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3657 tcg_temp_free_i64(n);
3658 tcg_temp_free_i64(z);
3659 return DISAS_NEXT;
3662 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3664 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3665 return DISAS_NEXT;
3668 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3670 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3671 return DISAS_NEXT;
3674 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3676 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3677 tcg_gen_mov_i64(o->out2, o->in2);
3678 return DISAS_NEXT;
3681 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3683 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3684 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3685 tcg_temp_free_i32(l);
3686 set_cc_static(s);
3687 return DISAS_NEXT;
3690 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3692 tcg_gen_neg_i64(o->out, o->in2);
3693 return DISAS_NEXT;
3696 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3698 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3699 return DISAS_NEXT;
3702 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3704 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3705 return DISAS_NEXT;
3708 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3710 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3711 tcg_gen_mov_i64(o->out2, o->in2);
3712 return DISAS_NEXT;
3715 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3717 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3718 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3719 tcg_temp_free_i32(l);
3720 set_cc_static(s);
3721 return DISAS_NEXT;
3724 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3726 tcg_gen_or_i64(o->out, o->in1, o->in2);
3727 return DISAS_NEXT;
3730 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3732 int shift = s->insn->data & 0xff;
3733 int size = s->insn->data >> 8;
3734 uint64_t mask = ((1ull << size) - 1) << shift;
3736 assert(!o->g_in2);
3737 tcg_gen_shli_i64(o->in2, o->in2, shift);
3738 tcg_gen_or_i64(o->out, o->in1, o->in2);
3740 /* Produce the CC from only the bits manipulated. */
3741 tcg_gen_andi_i64(cc_dst, o->out, mask);
3742 set_cc_nz_u64(s, cc_dst);
3743 return DISAS_NEXT;
3746 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3748 o->in1 = tcg_temp_new_i64();
3750 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3751 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3752 } else {
3753 /* Perform the atomic operation in memory. */
3754 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3755 s->insn->data);
3758 /* Recompute also for atomic case: needed for setting CC. */
3759 tcg_gen_or_i64(o->out, o->in1, o->in2);
3761 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3762 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3764 return DISAS_NEXT;
3767 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3769 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3770 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3771 tcg_temp_free_i32(l);
3772 return DISAS_NEXT;
3775 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3777 int l2 = get_field(s, l2) + 1;
3778 TCGv_i32 l;
3780 /* The length must not exceed 32 bytes. */
3781 if (l2 > 32) {
3782 gen_program_exception(s, PGM_SPECIFICATION);
3783 return DISAS_NORETURN;
3785 l = tcg_const_i32(l2);
3786 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3787 tcg_temp_free_i32(l);
3788 return DISAS_NEXT;
3791 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3793 int l2 = get_field(s, l2) + 1;
3794 TCGv_i32 l;
3796 /* The length must be even and should not exceed 64 bytes. */
3797 if ((l2 & 1) || (l2 > 64)) {
3798 gen_program_exception(s, PGM_SPECIFICATION);
3799 return DISAS_NORETURN;
3801 l = tcg_const_i32(l2);
3802 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3803 tcg_temp_free_i32(l);
3804 return DISAS_NEXT;
3807 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3809 gen_helper_popcnt(o->out, o->in2);
3810 return DISAS_NEXT;
3813 #ifndef CONFIG_USER_ONLY
3814 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3816 gen_helper_ptlb(cpu_env);
3817 return DISAS_NEXT;
3819 #endif
3821 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3823 int i3 = get_field(s, i3);
3824 int i4 = get_field(s, i4);
3825 int i5 = get_field(s, i5);
3826 int do_zero = i4 & 0x80;
3827 uint64_t mask, imask, pmask;
3828 int pos, len, rot;
3830 /* Adjust the arguments for the specific insn. */
3831 switch (s->fields.op2) {
3832 case 0x55: /* risbg */
3833 case 0x59: /* risbgn */
3834 i3 &= 63;
3835 i4 &= 63;
3836 pmask = ~0;
3837 break;
3838 case 0x5d: /* risbhg */
3839 i3 &= 31;
3840 i4 &= 31;
3841 pmask = 0xffffffff00000000ull;
3842 break;
3843 case 0x51: /* risblg */
3844 i3 &= 31;
3845 i4 &= 31;
3846 pmask = 0x00000000ffffffffull;
3847 break;
3848 default:
3849 g_assert_not_reached();
3852 /* MASK is the set of bits to be inserted from R2.
3853 Take care for I3/I4 wraparound. */
3854 mask = pmask >> i3;
3855 if (i3 <= i4) {
3856 mask ^= pmask >> i4 >> 1;
3857 } else {
3858 mask |= ~(pmask >> i4 >> 1);
3860 mask &= pmask;
3862 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3863 insns, we need to keep the other half of the register. */
3864 imask = ~mask | ~pmask;
3865 if (do_zero) {
3866 imask = ~pmask;
3869 len = i4 - i3 + 1;
3870 pos = 63 - i4;
3871 rot = i5 & 63;
3872 if (s->fields.op2 == 0x5d) {
3873 pos += 32;
3876 /* In some cases we can implement this with extract. */
3877 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3878 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3879 return DISAS_NEXT;
3882 /* In some cases we can implement this with deposit. */
3883 if (len > 0 && (imask == 0 || ~mask == imask)) {
3884 /* Note that we rotate the bits to be inserted to the lsb, not to
3885 the position as described in the PoO. */
3886 rot = (rot - pos) & 63;
3887 } else {
3888 pos = -1;
3891 /* Rotate the input as necessary. */
3892 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3894 /* Insert the selected bits into the output. */
3895 if (pos >= 0) {
3896 if (imask == 0) {
3897 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3898 } else {
3899 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3901 } else if (imask == 0) {
3902 tcg_gen_andi_i64(o->out, o->in2, mask);
3903 } else {
3904 tcg_gen_andi_i64(o->in2, o->in2, mask);
3905 tcg_gen_andi_i64(o->out, o->out, imask);
3906 tcg_gen_or_i64(o->out, o->out, o->in2);
3908 return DISAS_NEXT;
3911 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3913 int i3 = get_field(s, i3);
3914 int i4 = get_field(s, i4);
3915 int i5 = get_field(s, i5);
3916 uint64_t mask;
3918 /* If this is a test-only form, arrange to discard the result. */
3919 if (i3 & 0x80) {
3920 o->out = tcg_temp_new_i64();
3921 o->g_out = false;
3924 i3 &= 63;
3925 i4 &= 63;
3926 i5 &= 63;
3928 /* MASK is the set of bits to be operated on from R2.
3929 Take care for I3/I4 wraparound. */
3930 mask = ~0ull >> i3;
3931 if (i3 <= i4) {
3932 mask ^= ~0ull >> i4 >> 1;
3933 } else {
3934 mask |= ~(~0ull >> i4 >> 1);
3937 /* Rotate the input as necessary. */
3938 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3940 /* Operate. */
3941 switch (s->fields.op2) {
3942 case 0x54: /* AND */
3943 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3944 tcg_gen_and_i64(o->out, o->out, o->in2);
3945 break;
3946 case 0x56: /* OR */
3947 tcg_gen_andi_i64(o->in2, o->in2, mask);
3948 tcg_gen_or_i64(o->out, o->out, o->in2);
3949 break;
3950 case 0x57: /* XOR */
3951 tcg_gen_andi_i64(o->in2, o->in2, mask);
3952 tcg_gen_xor_i64(o->out, o->out, o->in2);
3953 break;
3954 default:
3955 abort();
3958 /* Set the CC. */
3959 tcg_gen_andi_i64(cc_dst, o->out, mask);
3960 set_cc_nz_u64(s, cc_dst);
3961 return DISAS_NEXT;
3964 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3966 tcg_gen_bswap16_i64(o->out, o->in2);
3967 return DISAS_NEXT;
3970 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3972 tcg_gen_bswap32_i64(o->out, o->in2);
3973 return DISAS_NEXT;
3976 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3978 tcg_gen_bswap64_i64(o->out, o->in2);
3979 return DISAS_NEXT;
3982 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3984 TCGv_i32 t1 = tcg_temp_new_i32();
3985 TCGv_i32 t2 = tcg_temp_new_i32();
3986 TCGv_i32 to = tcg_temp_new_i32();
3987 tcg_gen_extrl_i64_i32(t1, o->in1);
3988 tcg_gen_extrl_i64_i32(t2, o->in2);
3989 tcg_gen_rotl_i32(to, t1, t2);
3990 tcg_gen_extu_i32_i64(o->out, to);
3991 tcg_temp_free_i32(t1);
3992 tcg_temp_free_i32(t2);
3993 tcg_temp_free_i32(to);
3994 return DISAS_NEXT;
3997 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3999 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
4000 return DISAS_NEXT;
4003 #ifndef CONFIG_USER_ONLY
4004 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
4006 gen_helper_rrbe(cc_op, cpu_env, o->in2);
4007 set_cc_static(s);
4008 return DISAS_NEXT;
4011 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
4013 gen_helper_sacf(cpu_env, o->in2);
4014 /* Addressing mode has changed, so end the block. */
4015 return DISAS_PC_STALE;
4017 #endif
4019 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
4021 int sam = s->insn->data;
4022 TCGv_i64 tsam;
4023 uint64_t mask;
4025 switch (sam) {
4026 case 0:
4027 mask = 0xffffff;
4028 break;
4029 case 1:
4030 mask = 0x7fffffff;
4031 break;
4032 default:
4033 mask = -1;
4034 break;
4037 /* Bizarre but true, we check the address of the current insn for the
4038 specification exception, not the next to be executed. Thus the PoO
4039 documents that Bad Things Happen two bytes before the end. */
4040 if (s->base.pc_next & ~mask) {
4041 gen_program_exception(s, PGM_SPECIFICATION);
4042 return DISAS_NORETURN;
4044 s->pc_tmp &= mask;
4046 tsam = tcg_const_i64(sam);
4047 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4048 tcg_temp_free_i64(tsam);
4050 /* Always exit the TB, since we (may have) changed execution mode. */
4051 return DISAS_PC_STALE;
4054 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4056 int r1 = get_field(s, r1);
4057 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4058 return DISAS_NEXT;
4061 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4063 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4064 return DISAS_NEXT;
4067 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4069 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4070 return DISAS_NEXT;
4073 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4075 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4076 return_low128(o->out2);
4077 return DISAS_NEXT;
4080 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4082 gen_helper_sqeb(o->out, cpu_env, o->in2);
4083 return DISAS_NEXT;
4086 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4088 gen_helper_sqdb(o->out, cpu_env, o->in2);
4089 return DISAS_NEXT;
4092 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4094 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4095 return_low128(o->out2);
4096 return DISAS_NEXT;
4099 #ifndef CONFIG_USER_ONLY
4100 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4102 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4103 set_cc_static(s);
4104 return DISAS_NEXT;
4107 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4109 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4110 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4111 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4112 set_cc_static(s);
4113 tcg_temp_free_i32(r1);
4114 tcg_temp_free_i32(r3);
4115 return DISAS_NEXT;
4117 #endif
4119 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4121 DisasCompare c;
4122 TCGv_i64 a, h;
4123 TCGLabel *lab;
4124 int r1;
4126 disas_jcc(s, &c, get_field(s, m3));
4128 /* We want to store when the condition is fulfilled, so branch
4129 out when it's not */
4130 c.cond = tcg_invert_cond(c.cond);
4132 lab = gen_new_label();
4133 if (c.is_64) {
4134 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4135 } else {
4136 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4138 free_compare(&c);
4140 r1 = get_field(s, r1);
4141 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4142 switch (s->insn->data) {
4143 case 1: /* STOCG */
4144 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4145 break;
4146 case 0: /* STOC */
4147 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4148 break;
4149 case 2: /* STOCFH */
4150 h = tcg_temp_new_i64();
4151 tcg_gen_shri_i64(h, regs[r1], 32);
4152 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4153 tcg_temp_free_i64(h);
4154 break;
4155 default:
4156 g_assert_not_reached();
4158 tcg_temp_free_i64(a);
4160 gen_set_label(lab);
4161 return DISAS_NEXT;
4164 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4166 uint64_t sign = 1ull << s->insn->data;
4167 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4168 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4169 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4170 /* The arithmetic left shift is curious in that it does not affect
4171 the sign bit. Copy that over from the source unchanged. */
4172 tcg_gen_andi_i64(o->out, o->out, ~sign);
4173 tcg_gen_andi_i64(o->in1, o->in1, sign);
4174 tcg_gen_or_i64(o->out, o->out, o->in1);
4175 return DISAS_NEXT;
4178 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4180 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4181 return DISAS_NEXT;
4184 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4186 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4187 return DISAS_NEXT;
4190 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4192 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4193 return DISAS_NEXT;
4196 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4198 gen_helper_sfpc(cpu_env, o->in2);
4199 return DISAS_NEXT;
4202 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4204 gen_helper_sfas(cpu_env, o->in2);
4205 return DISAS_NEXT;
4208 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4210 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4211 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4212 gen_helper_srnm(cpu_env, o->addr1);
4213 return DISAS_NEXT;
4216 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4218 /* Bits 0-55 are are ignored. */
4219 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4220 gen_helper_srnm(cpu_env, o->addr1);
4221 return DISAS_NEXT;
4224 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4226 TCGv_i64 tmp = tcg_temp_new_i64();
4228 /* Bits other than 61-63 are ignored. */
4229 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4231 /* No need to call a helper, we don't implement dfp */
4232 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4233 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4234 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4236 tcg_temp_free_i64(tmp);
4237 return DISAS_NEXT;
4240 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4242 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4243 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4244 set_cc_static(s);
4246 tcg_gen_shri_i64(o->in1, o->in1, 24);
4247 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4248 return DISAS_NEXT;
4251 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4253 int b1 = get_field(s, b1);
4254 int d1 = get_field(s, d1);
4255 int b2 = get_field(s, b2);
4256 int d2 = get_field(s, d2);
4257 int r3 = get_field(s, r3);
4258 TCGv_i64 tmp = tcg_temp_new_i64();
4260 /* fetch all operands first */
4261 o->in1 = tcg_temp_new_i64();
4262 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4263 o->in2 = tcg_temp_new_i64();
4264 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4265 o->addr1 = get_address(s, 0, r3, 0);
4267 /* load the third operand into r3 before modifying anything */
4268 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4270 /* subtract CPU timer from first operand and store in GR0 */
4271 gen_helper_stpt(tmp, cpu_env);
4272 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4274 /* store second operand in GR1 */
4275 tcg_gen_mov_i64(regs[1], o->in2);
4277 tcg_temp_free_i64(tmp);
4278 return DISAS_NEXT;
4281 #ifndef CONFIG_USER_ONLY
4282 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4284 tcg_gen_shri_i64(o->in2, o->in2, 4);
4285 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4286 return DISAS_NEXT;
4289 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4291 gen_helper_sske(cpu_env, o->in1, o->in2);
4292 return DISAS_NEXT;
4295 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4297 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4298 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4299 return DISAS_PC_STALE_NOCHAIN;
4302 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4304 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4305 return DISAS_NEXT;
4307 #endif
4309 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4311 gen_helper_stck(o->out, cpu_env);
4312 /* ??? We don't implement clock states. */
4313 gen_op_movi_cc(s, 0);
4314 return DISAS_NEXT;
4317 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4319 TCGv_i64 c1 = tcg_temp_new_i64();
4320 TCGv_i64 c2 = tcg_temp_new_i64();
4321 TCGv_i64 todpr = tcg_temp_new_i64();
4322 gen_helper_stck(c1, cpu_env);
4323 /* 16 bit value store in an uint32_t (only valid bits set) */
4324 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4325 /* Shift the 64-bit value into its place as a zero-extended
4326 104-bit value. Note that "bit positions 64-103 are always
4327 non-zero so that they compare differently to STCK"; we set
4328 the least significant bit to 1. */
4329 tcg_gen_shli_i64(c2, c1, 56);
4330 tcg_gen_shri_i64(c1, c1, 8);
4331 tcg_gen_ori_i64(c2, c2, 0x10000);
4332 tcg_gen_or_i64(c2, c2, todpr);
4333 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4334 tcg_gen_addi_i64(o->in2, o->in2, 8);
4335 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4336 tcg_temp_free_i64(c1);
4337 tcg_temp_free_i64(c2);
4338 tcg_temp_free_i64(todpr);
4339 /* ??? We don't implement clock states. */
4340 gen_op_movi_cc(s, 0);
4341 return DISAS_NEXT;
4344 #ifndef CONFIG_USER_ONLY
4345 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4347 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4348 gen_helper_sck(cc_op, cpu_env, o->in1);
4349 set_cc_static(s);
4350 return DISAS_NEXT;
4353 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4355 gen_helper_sckc(cpu_env, o->in2);
4356 return DISAS_NEXT;
4359 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4361 gen_helper_sckpf(cpu_env, regs[0]);
4362 return DISAS_NEXT;
4365 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4367 gen_helper_stckc(o->out, cpu_env);
4368 return DISAS_NEXT;
4371 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4373 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4374 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4375 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4376 tcg_temp_free_i32(r1);
4377 tcg_temp_free_i32(r3);
4378 return DISAS_NEXT;
4381 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4383 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4384 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4385 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4386 tcg_temp_free_i32(r1);
4387 tcg_temp_free_i32(r3);
4388 return DISAS_NEXT;
4391 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4393 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4394 return DISAS_NEXT;
4397 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4399 gen_helper_spt(cpu_env, o->in2);
4400 return DISAS_NEXT;
4403 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4405 gen_helper_stfl(cpu_env);
4406 return DISAS_NEXT;
4409 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4411 gen_helper_stpt(o->out, cpu_env);
4412 return DISAS_NEXT;
4415 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4417 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4418 set_cc_static(s);
4419 return DISAS_NEXT;
4422 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4424 gen_helper_spx(cpu_env, o->in2);
4425 return DISAS_NEXT;
4428 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4430 gen_helper_xsch(cpu_env, regs[1]);
4431 set_cc_static(s);
4432 return DISAS_NEXT;
4435 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4437 gen_helper_csch(cpu_env, regs[1]);
4438 set_cc_static(s);
4439 return DISAS_NEXT;
4442 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4444 gen_helper_hsch(cpu_env, regs[1]);
4445 set_cc_static(s);
4446 return DISAS_NEXT;
4449 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4451 gen_helper_msch(cpu_env, regs[1], o->in2);
4452 set_cc_static(s);
4453 return DISAS_NEXT;
4456 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4458 gen_helper_rchp(cpu_env, regs[1]);
4459 set_cc_static(s);
4460 return DISAS_NEXT;
4463 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4465 gen_helper_rsch(cpu_env, regs[1]);
4466 set_cc_static(s);
4467 return DISAS_NEXT;
4470 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4472 gen_helper_sal(cpu_env, regs[1]);
4473 return DISAS_NEXT;
4476 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4478 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4479 return DISAS_NEXT;
4482 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4484 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4485 gen_op_movi_cc(s, 3);
4486 return DISAS_NEXT;
4489 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4491 /* The instruction is suppressed if not provided. */
4492 return DISAS_NEXT;
4495 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4497 gen_helper_ssch(cpu_env, regs[1], o->in2);
4498 set_cc_static(s);
4499 return DISAS_NEXT;
4502 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4504 gen_helper_stsch(cpu_env, regs[1], o->in2);
4505 set_cc_static(s);
4506 return DISAS_NEXT;
4509 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4511 gen_helper_stcrw(cpu_env, o->in2);
4512 set_cc_static(s);
4513 return DISAS_NEXT;
4516 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4518 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4519 set_cc_static(s);
4520 return DISAS_NEXT;
4523 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4525 gen_helper_tsch(cpu_env, regs[1], o->in2);
4526 set_cc_static(s);
4527 return DISAS_NEXT;
4530 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4532 gen_helper_chsc(cpu_env, o->in2);
4533 set_cc_static(s);
4534 return DISAS_NEXT;
4537 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4539 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4540 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4541 return DISAS_NEXT;
4544 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4546 uint64_t i2 = get_field(s, i2);
4547 TCGv_i64 t;
4549 /* It is important to do what the instruction name says: STORE THEN.
4550 If we let the output hook perform the store then if we fault and
4551 restart, we'll have the wrong SYSTEM MASK in place. */
4552 t = tcg_temp_new_i64();
4553 tcg_gen_shri_i64(t, psw_mask, 56);
4554 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4555 tcg_temp_free_i64(t);
4557 if (s->fields.op == 0xac) {
4558 tcg_gen_andi_i64(psw_mask, psw_mask,
4559 (i2 << 56) | 0x00ffffffffffffffull);
4560 } else {
4561 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4564 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4565 return DISAS_PC_STALE_NOCHAIN;
4568 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4570 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
4571 tcg_gen_qemu_st_tl(o->in1, o->addr1, MMU_REAL_IDX, s->insn->data);
4573 if (s->base.tb->flags & FLAG_MASK_PER) {
4574 update_psw_addr(s);
4575 gen_helper_per_store_real(cpu_env);
4577 return DISAS_NEXT;
4579 #endif
4581 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4583 gen_helper_stfle(cc_op, cpu_env, o->in2);
4584 set_cc_static(s);
4585 return DISAS_NEXT;
4588 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4590 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4591 return DISAS_NEXT;
4594 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4596 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4597 return DISAS_NEXT;
4600 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4602 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4603 return DISAS_NEXT;
4606 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4608 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4609 return DISAS_NEXT;
4612 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4614 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4615 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4616 gen_helper_stam(cpu_env, r1, o->in2, r3);
4617 tcg_temp_free_i32(r1);
4618 tcg_temp_free_i32(r3);
4619 return DISAS_NEXT;
4622 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4624 int m3 = get_field(s, m3);
4625 int pos, base = s->insn->data;
4626 TCGv_i64 tmp = tcg_temp_new_i64();
4628 pos = base + ctz32(m3) * 8;
4629 switch (m3) {
4630 case 0xf:
4631 /* Effectively a 32-bit store. */
4632 tcg_gen_shri_i64(tmp, o->in1, pos);
4633 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4634 break;
4636 case 0xc:
4637 case 0x6:
4638 case 0x3:
4639 /* Effectively a 16-bit store. */
4640 tcg_gen_shri_i64(tmp, o->in1, pos);
4641 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4642 break;
4644 case 0x8:
4645 case 0x4:
4646 case 0x2:
4647 case 0x1:
4648 /* Effectively an 8-bit store. */
4649 tcg_gen_shri_i64(tmp, o->in1, pos);
4650 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4651 break;
4653 default:
4654 /* This is going to be a sequence of shifts and stores. */
4655 pos = base + 32 - 8;
4656 while (m3) {
4657 if (m3 & 0x8) {
4658 tcg_gen_shri_i64(tmp, o->in1, pos);
4659 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4660 tcg_gen_addi_i64(o->in2, o->in2, 1);
4662 m3 = (m3 << 1) & 0xf;
4663 pos -= 8;
4665 break;
4667 tcg_temp_free_i64(tmp);
4668 return DISAS_NEXT;
4671 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4673 int r1 = get_field(s, r1);
4674 int r3 = get_field(s, r3);
4675 int size = s->insn->data;
4676 TCGv_i64 tsize = tcg_const_i64(size);
4678 while (1) {
4679 if (size == 8) {
4680 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4681 } else {
4682 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4684 if (r1 == r3) {
4685 break;
4687 tcg_gen_add_i64(o->in2, o->in2, tsize);
4688 r1 = (r1 + 1) & 15;
4691 tcg_temp_free_i64(tsize);
4692 return DISAS_NEXT;
4695 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4697 int r1 = get_field(s, r1);
4698 int r3 = get_field(s, r3);
4699 TCGv_i64 t = tcg_temp_new_i64();
4700 TCGv_i64 t4 = tcg_const_i64(4);
4701 TCGv_i64 t32 = tcg_const_i64(32);
4703 while (1) {
4704 tcg_gen_shl_i64(t, regs[r1], t32);
4705 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4706 if (r1 == r3) {
4707 break;
4709 tcg_gen_add_i64(o->in2, o->in2, t4);
4710 r1 = (r1 + 1) & 15;
4713 tcg_temp_free_i64(t);
4714 tcg_temp_free_i64(t4);
4715 tcg_temp_free_i64(t32);
4716 return DISAS_NEXT;
4719 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4721 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4722 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4723 } else if (HAVE_ATOMIC128) {
4724 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4725 } else {
4726 gen_helper_exit_atomic(cpu_env);
4727 return DISAS_NORETURN;
4729 return DISAS_NEXT;
4732 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4734 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4735 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4737 gen_helper_srst(cpu_env, r1, r2);
4739 tcg_temp_free_i32(r1);
4740 tcg_temp_free_i32(r2);
4741 set_cc_static(s);
4742 return DISAS_NEXT;
4745 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4747 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4748 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4750 gen_helper_srstu(cpu_env, r1, r2);
4752 tcg_temp_free_i32(r1);
4753 tcg_temp_free_i32(r2);
4754 set_cc_static(s);
4755 return DISAS_NEXT;
4758 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4760 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4761 return DISAS_NEXT;
4764 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4766 DisasCompare cmp;
4767 TCGv_i64 borrow;
4769 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4771 /* The !borrow flag is the msb of CC. Since we want the inverse of
4772 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4773 disas_jcc(s, &cmp, 8 | 4);
4774 borrow = tcg_temp_new_i64();
4775 if (cmp.is_64) {
4776 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4777 } else {
4778 TCGv_i32 t = tcg_temp_new_i32();
4779 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4780 tcg_gen_extu_i32_i64(borrow, t);
4781 tcg_temp_free_i32(t);
4783 free_compare(&cmp);
4785 tcg_gen_sub_i64(o->out, o->out, borrow);
4786 tcg_temp_free_i64(borrow);
4787 return DISAS_NEXT;
4790 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4792 TCGv_i32 t;
4794 update_psw_addr(s);
4795 update_cc_op(s);
4797 t = tcg_const_i32(get_field(s, i1) & 0xff);
4798 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4799 tcg_temp_free_i32(t);
4801 t = tcg_const_i32(s->ilen);
4802 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4803 tcg_temp_free_i32(t);
4805 gen_exception(EXCP_SVC);
4806 return DISAS_NORETURN;
4809 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4811 int cc = 0;
4813 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4814 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4815 gen_op_movi_cc(s, cc);
4816 return DISAS_NEXT;
4819 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4821 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4822 set_cc_static(s);
4823 return DISAS_NEXT;
4826 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4828 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4829 set_cc_static(s);
4830 return DISAS_NEXT;
4833 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4835 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4836 set_cc_static(s);
4837 return DISAS_NEXT;
4840 #ifndef CONFIG_USER_ONLY
4842 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4844 gen_helper_testblock(cc_op, cpu_env, o->in2);
4845 set_cc_static(s);
4846 return DISAS_NEXT;
4849 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4851 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4852 set_cc_static(s);
4853 return DISAS_NEXT;
4856 #endif
4858 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4860 TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4861 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4862 tcg_temp_free_i32(l1);
4863 set_cc_static(s);
4864 return DISAS_NEXT;
4867 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4869 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4870 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4871 tcg_temp_free_i32(l);
4872 set_cc_static(s);
4873 return DISAS_NEXT;
4876 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4878 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4879 return_low128(o->out2);
4880 set_cc_static(s);
4881 return DISAS_NEXT;
4884 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4886 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4887 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4888 tcg_temp_free_i32(l);
4889 set_cc_static(s);
4890 return DISAS_NEXT;
4893 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4895 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4896 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4897 tcg_temp_free_i32(l);
4898 set_cc_static(s);
4899 return DISAS_NEXT;
4902 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4904 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4905 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4906 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4907 TCGv_i32 tst = tcg_temp_new_i32();
4908 int m3 = get_field(s, m3);
4910 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4911 m3 = 0;
4913 if (m3 & 1) {
4914 tcg_gen_movi_i32(tst, -1);
4915 } else {
4916 tcg_gen_extrl_i64_i32(tst, regs[0]);
4917 if (s->insn->opc & 3) {
4918 tcg_gen_ext8u_i32(tst, tst);
4919 } else {
4920 tcg_gen_ext16u_i32(tst, tst);
4923 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4925 tcg_temp_free_i32(r1);
4926 tcg_temp_free_i32(r2);
4927 tcg_temp_free_i32(sizes);
4928 tcg_temp_free_i32(tst);
4929 set_cc_static(s);
4930 return DISAS_NEXT;
4933 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4935 TCGv_i32 t1 = tcg_const_i32(0xff);
4936 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4937 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4938 tcg_temp_free_i32(t1);
4939 set_cc_static(s);
4940 return DISAS_NEXT;
4943 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4945 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4946 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4947 tcg_temp_free_i32(l);
4948 return DISAS_NEXT;
4951 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4953 int l1 = get_field(s, l1) + 1;
4954 TCGv_i32 l;
4956 /* The length must not exceed 32 bytes. */
4957 if (l1 > 32) {
4958 gen_program_exception(s, PGM_SPECIFICATION);
4959 return DISAS_NORETURN;
4961 l = tcg_const_i32(l1);
4962 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4963 tcg_temp_free_i32(l);
4964 set_cc_static(s);
4965 return DISAS_NEXT;
4968 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4970 int l1 = get_field(s, l1) + 1;
4971 TCGv_i32 l;
4973 /* The length must be even and should not exceed 64 bytes. */
4974 if ((l1 & 1) || (l1 > 64)) {
4975 gen_program_exception(s, PGM_SPECIFICATION);
4976 return DISAS_NORETURN;
4978 l = tcg_const_i32(l1);
4979 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4980 tcg_temp_free_i32(l);
4981 set_cc_static(s);
4982 return DISAS_NEXT;
4986 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4988 int d1 = get_field(s, d1);
4989 int d2 = get_field(s, d2);
4990 int b1 = get_field(s, b1);
4991 int b2 = get_field(s, b2);
4992 int l = get_field(s, l1);
4993 TCGv_i32 t32;
4995 o->addr1 = get_address(s, 0, b1, d1);
4997 /* If the addresses are identical, this is a store/memset of zero. */
4998 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4999 o->in2 = tcg_const_i64(0);
5001 l++;
5002 while (l >= 8) {
5003 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
5004 l -= 8;
5005 if (l > 0) {
5006 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
5009 if (l >= 4) {
5010 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
5011 l -= 4;
5012 if (l > 0) {
5013 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
5016 if (l >= 2) {
5017 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
5018 l -= 2;
5019 if (l > 0) {
5020 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5023 if (l) {
5024 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5026 gen_op_movi_cc(s, 0);
5027 return DISAS_NEXT;
5030 /* But in general we'll defer to a helper. */
5031 o->in2 = get_address(s, 0, b2, d2);
5032 t32 = tcg_const_i32(l);
5033 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5034 tcg_temp_free_i32(t32);
5035 set_cc_static(s);
5036 return DISAS_NEXT;
5039 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5041 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5042 return DISAS_NEXT;
5045 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5047 int shift = s->insn->data & 0xff;
5048 int size = s->insn->data >> 8;
5049 uint64_t mask = ((1ull << size) - 1) << shift;
5051 assert(!o->g_in2);
5052 tcg_gen_shli_i64(o->in2, o->in2, shift);
5053 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5055 /* Produce the CC from only the bits manipulated. */
5056 tcg_gen_andi_i64(cc_dst, o->out, mask);
5057 set_cc_nz_u64(s, cc_dst);
5058 return DISAS_NEXT;
5061 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5063 o->in1 = tcg_temp_new_i64();
5065 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5066 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5067 } else {
5068 /* Perform the atomic operation in memory. */
5069 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5070 s->insn->data);
5073 /* Recompute also for atomic case: needed for setting CC. */
5074 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5076 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5077 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5079 return DISAS_NEXT;
5082 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5084 o->out = tcg_const_i64(0);
5085 return DISAS_NEXT;
5088 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5090 o->out = tcg_const_i64(0);
5091 o->out2 = o->out;
5092 o->g_out2 = true;
5093 return DISAS_NEXT;
5096 #ifndef CONFIG_USER_ONLY
5097 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5099 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5101 gen_helper_clp(cpu_env, r2);
5102 tcg_temp_free_i32(r2);
5103 set_cc_static(s);
5104 return DISAS_NEXT;
5107 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5109 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5110 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5112 gen_helper_pcilg(cpu_env, r1, r2);
5113 tcg_temp_free_i32(r1);
5114 tcg_temp_free_i32(r2);
5115 set_cc_static(s);
5116 return DISAS_NEXT;
5119 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5121 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5122 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5124 gen_helper_pcistg(cpu_env, r1, r2);
5125 tcg_temp_free_i32(r1);
5126 tcg_temp_free_i32(r2);
5127 set_cc_static(s);
5128 return DISAS_NEXT;
5131 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5133 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5134 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5136 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5137 tcg_temp_free_i32(ar);
5138 tcg_temp_free_i32(r1);
5139 set_cc_static(s);
5140 return DISAS_NEXT;
5143 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5145 gen_helper_sic(cpu_env, o->in1, o->in2);
5146 return DISAS_NEXT;
5149 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5151 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5152 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5154 gen_helper_rpcit(cpu_env, r1, r2);
5155 tcg_temp_free_i32(r1);
5156 tcg_temp_free_i32(r2);
5157 set_cc_static(s);
5158 return DISAS_NEXT;
5161 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5163 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5164 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5165 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5167 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5168 tcg_temp_free_i32(ar);
5169 tcg_temp_free_i32(r1);
5170 tcg_temp_free_i32(r3);
5171 set_cc_static(s);
5172 return DISAS_NEXT;
5175 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5177 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5178 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5180 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5181 tcg_temp_free_i32(ar);
5182 tcg_temp_free_i32(r1);
5183 set_cc_static(s);
5184 return DISAS_NEXT;
5186 #endif
5188 #include "translate_vx.c.inc"
5190 /* ====================================================================== */
5191 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5192 the original inputs), update the various cc data structures in order to
5193 be able to compute the new condition code. */
5195 static void cout_abs32(DisasContext *s, DisasOps *o)
5197 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5200 static void cout_abs64(DisasContext *s, DisasOps *o)
5202 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5205 static void cout_adds32(DisasContext *s, DisasOps *o)
5207 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5210 static void cout_adds64(DisasContext *s, DisasOps *o)
5212 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5215 static void cout_addu32(DisasContext *s, DisasOps *o)
5217 tcg_gen_shri_i64(cc_src, o->out, 32);
5218 tcg_gen_ext32u_i64(cc_dst, o->out);
5219 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5222 static void cout_addu64(DisasContext *s, DisasOps *o)
5224 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5227 static void cout_cmps32(DisasContext *s, DisasOps *o)
5229 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5232 static void cout_cmps64(DisasContext *s, DisasOps *o)
5234 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5237 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5239 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5242 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5244 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5247 static void cout_f32(DisasContext *s, DisasOps *o)
5249 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5252 static void cout_f64(DisasContext *s, DisasOps *o)
5254 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5257 static void cout_f128(DisasContext *s, DisasOps *o)
5259 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5262 static void cout_nabs32(DisasContext *s, DisasOps *o)
5264 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5267 static void cout_nabs64(DisasContext *s, DisasOps *o)
5269 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5272 static void cout_neg32(DisasContext *s, DisasOps *o)
5274 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5277 static void cout_neg64(DisasContext *s, DisasOps *o)
5279 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5282 static void cout_nz32(DisasContext *s, DisasOps *o)
5284 tcg_gen_ext32u_i64(cc_dst, o->out);
5285 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5288 static void cout_nz64(DisasContext *s, DisasOps *o)
5290 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5293 static void cout_s32(DisasContext *s, DisasOps *o)
5295 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5298 static void cout_s64(DisasContext *s, DisasOps *o)
5300 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5303 static void cout_subs32(DisasContext *s, DisasOps *o)
5305 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5308 static void cout_subs64(DisasContext *s, DisasOps *o)
5310 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5313 static void cout_subu32(DisasContext *s, DisasOps *o)
5315 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5318 static void cout_subu64(DisasContext *s, DisasOps *o)
5320 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5323 static void cout_subb32(DisasContext *s, DisasOps *o)
5325 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5328 static void cout_subb64(DisasContext *s, DisasOps *o)
5330 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5333 static void cout_tm32(DisasContext *s, DisasOps *o)
5335 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5338 static void cout_tm64(DisasContext *s, DisasOps *o)
5340 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5343 static void cout_muls32(DisasContext *s, DisasOps *o)
5345 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5348 static void cout_muls64(DisasContext *s, DisasOps *o)
5350 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5351 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5354 /* ====================================================================== */
5355 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5356 with the TCG register to which we will write. Used in combination with
5357 the "wout" generators, in some cases we need a new temporary, and in
5358 some cases we can write to a TCG global. */
5360 static void prep_new(DisasContext *s, DisasOps *o)
5362 o->out = tcg_temp_new_i64();
5364 #define SPEC_prep_new 0
5366 static void prep_new_P(DisasContext *s, DisasOps *o)
5368 o->out = tcg_temp_new_i64();
5369 o->out2 = tcg_temp_new_i64();
5371 #define SPEC_prep_new_P 0
5373 static void prep_r1(DisasContext *s, DisasOps *o)
5375 o->out = regs[get_field(s, r1)];
5376 o->g_out = true;
5378 #define SPEC_prep_r1 0
5380 static void prep_r1_P(DisasContext *s, DisasOps *o)
5382 int r1 = get_field(s, r1);
5383 o->out = regs[r1];
5384 o->out2 = regs[r1 + 1];
5385 o->g_out = o->g_out2 = true;
5387 #define SPEC_prep_r1_P SPEC_r1_even
5389 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5390 static void prep_x1(DisasContext *s, DisasOps *o)
5392 o->out = load_freg(get_field(s, r1));
5393 o->out2 = load_freg(get_field(s, r1) + 2);
5395 #define SPEC_prep_x1 SPEC_r1_f128
5397 /* ====================================================================== */
5398 /* The "Write OUTput" generators. These generally perform some non-trivial
5399 copy of data to TCG globals, or to main memory. The trivial cases are
5400 generally handled by having a "prep" generator install the TCG global
5401 as the destination of the operation. */
5403 static void wout_r1(DisasContext *s, DisasOps *o)
5405 store_reg(get_field(s, r1), o->out);
5407 #define SPEC_wout_r1 0
5409 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5411 store_reg(get_field(s, r1), o->out2);
5413 #define SPEC_wout_out2_r1 0
5415 static void wout_r1_8(DisasContext *s, DisasOps *o)
5417 int r1 = get_field(s, r1);
5418 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5420 #define SPEC_wout_r1_8 0
5422 static void wout_r1_16(DisasContext *s, DisasOps *o)
5424 int r1 = get_field(s, r1);
5425 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5427 #define SPEC_wout_r1_16 0
5429 static void wout_r1_32(DisasContext *s, DisasOps *o)
5431 store_reg32_i64(get_field(s, r1), o->out);
5433 #define SPEC_wout_r1_32 0
5435 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5437 store_reg32h_i64(get_field(s, r1), o->out);
5439 #define SPEC_wout_r1_32h 0
5441 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5443 int r1 = get_field(s, r1);
5444 store_reg32_i64(r1, o->out);
5445 store_reg32_i64(r1 + 1, o->out2);
5447 #define SPEC_wout_r1_P32 SPEC_r1_even
5449 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5451 int r1 = get_field(s, r1);
5452 store_reg32_i64(r1 + 1, o->out);
5453 tcg_gen_shri_i64(o->out, o->out, 32);
5454 store_reg32_i64(r1, o->out);
5456 #define SPEC_wout_r1_D32 SPEC_r1_even
5458 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5460 int r3 = get_field(s, r3);
5461 store_reg32_i64(r3, o->out);
5462 store_reg32_i64(r3 + 1, o->out2);
5464 #define SPEC_wout_r3_P32 SPEC_r3_even
5466 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5468 int r3 = get_field(s, r3);
5469 store_reg(r3, o->out);
5470 store_reg(r3 + 1, o->out2);
5472 #define SPEC_wout_r3_P64 SPEC_r3_even
5474 static void wout_e1(DisasContext *s, DisasOps *o)
5476 store_freg32_i64(get_field(s, r1), o->out);
5478 #define SPEC_wout_e1 0
5480 static void wout_f1(DisasContext *s, DisasOps *o)
5482 store_freg(get_field(s, r1), o->out);
5484 #define SPEC_wout_f1 0
5486 static void wout_x1(DisasContext *s, DisasOps *o)
5488 int f1 = get_field(s, r1);
5489 store_freg(f1, o->out);
5490 store_freg(f1 + 2, o->out2);
5492 #define SPEC_wout_x1 SPEC_r1_f128
5494 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5496 if (get_field(s, r1) != get_field(s, r2)) {
5497 store_reg32_i64(get_field(s, r1), o->out);
5500 #define SPEC_wout_cond_r1r2_32 0
5502 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5504 if (get_field(s, r1) != get_field(s, r2)) {
5505 store_freg32_i64(get_field(s, r1), o->out);
5508 #define SPEC_wout_cond_e1e2 0
5510 static void wout_m1_8(DisasContext *s, DisasOps *o)
5512 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5514 #define SPEC_wout_m1_8 0
5516 static void wout_m1_16(DisasContext *s, DisasOps *o)
5518 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5520 #define SPEC_wout_m1_16 0
5522 #ifndef CONFIG_USER_ONLY
5523 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5525 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5527 #define SPEC_wout_m1_16a 0
5528 #endif
5530 static void wout_m1_32(DisasContext *s, DisasOps *o)
5532 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5534 #define SPEC_wout_m1_32 0
5536 #ifndef CONFIG_USER_ONLY
5537 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5539 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5541 #define SPEC_wout_m1_32a 0
5542 #endif
5544 static void wout_m1_64(DisasContext *s, DisasOps *o)
5546 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5548 #define SPEC_wout_m1_64 0
5550 #ifndef CONFIG_USER_ONLY
5551 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5553 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5555 #define SPEC_wout_m1_64a 0
5556 #endif
5558 static void wout_m2_32(DisasContext *s, DisasOps *o)
5560 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5562 #define SPEC_wout_m2_32 0
5564 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5566 store_reg(get_field(s, r1), o->in2);
5568 #define SPEC_wout_in2_r1 0
5570 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5572 store_reg32_i64(get_field(s, r1), o->in2);
5574 #define SPEC_wout_in2_r1_32 0
5576 /* ====================================================================== */
5577 /* The "INput 1" generators. These load the first operand to an insn. */
5579 static void in1_r1(DisasContext *s, DisasOps *o)
5581 o->in1 = load_reg(get_field(s, r1));
5583 #define SPEC_in1_r1 0
5585 static void in1_r1_o(DisasContext *s, DisasOps *o)
5587 o->in1 = regs[get_field(s, r1)];
5588 o->g_in1 = true;
5590 #define SPEC_in1_r1_o 0
5592 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5594 o->in1 = tcg_temp_new_i64();
5595 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5597 #define SPEC_in1_r1_32s 0
5599 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5601 o->in1 = tcg_temp_new_i64();
5602 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5604 #define SPEC_in1_r1_32u 0
5606 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5608 o->in1 = tcg_temp_new_i64();
5609 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5611 #define SPEC_in1_r1_sr32 0
5613 static void in1_r1p1(DisasContext *s, DisasOps *o)
5615 o->in1 = load_reg(get_field(s, r1) + 1);
5617 #define SPEC_in1_r1p1 SPEC_r1_even
5619 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5621 o->in1 = regs[get_field(s, r1) + 1];
5622 o->g_in1 = true;
5624 #define SPEC_in1_r1p1_o SPEC_r1_even
5626 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5628 o->in1 = tcg_temp_new_i64();
5629 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5631 #define SPEC_in1_r1p1_32s SPEC_r1_even
5633 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5635 o->in1 = tcg_temp_new_i64();
5636 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5638 #define SPEC_in1_r1p1_32u SPEC_r1_even
5640 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5642 int r1 = get_field(s, r1);
5643 o->in1 = tcg_temp_new_i64();
5644 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5646 #define SPEC_in1_r1_D32 SPEC_r1_even
5648 static void in1_r2(DisasContext *s, DisasOps *o)
5650 o->in1 = load_reg(get_field(s, r2));
5652 #define SPEC_in1_r2 0
5654 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5656 o->in1 = tcg_temp_new_i64();
5657 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5659 #define SPEC_in1_r2_sr32 0
5661 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5663 o->in1 = tcg_temp_new_i64();
5664 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5666 #define SPEC_in1_r2_32u 0
5668 static void in1_r3(DisasContext *s, DisasOps *o)
5670 o->in1 = load_reg(get_field(s, r3));
5672 #define SPEC_in1_r3 0
5674 static void in1_r3_o(DisasContext *s, DisasOps *o)
5676 o->in1 = regs[get_field(s, r3)];
5677 o->g_in1 = true;
5679 #define SPEC_in1_r3_o 0
5681 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5683 o->in1 = tcg_temp_new_i64();
5684 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5686 #define SPEC_in1_r3_32s 0
5688 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5690 o->in1 = tcg_temp_new_i64();
5691 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5693 #define SPEC_in1_r3_32u 0
5695 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5697 int r3 = get_field(s, r3);
5698 o->in1 = tcg_temp_new_i64();
5699 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5701 #define SPEC_in1_r3_D32 SPEC_r3_even
5703 static void in1_e1(DisasContext *s, DisasOps *o)
5705 o->in1 = load_freg32_i64(get_field(s, r1));
5707 #define SPEC_in1_e1 0
5709 static void in1_f1(DisasContext *s, DisasOps *o)
5711 o->in1 = load_freg(get_field(s, r1));
5713 #define SPEC_in1_f1 0
5715 /* Load the high double word of an extended (128-bit) format FP number */
5716 static void in1_x2h(DisasContext *s, DisasOps *o)
5718 o->in1 = load_freg(get_field(s, r2));
5720 #define SPEC_in1_x2h SPEC_r2_f128
5722 static void in1_f3(DisasContext *s, DisasOps *o)
5724 o->in1 = load_freg(get_field(s, r3));
5726 #define SPEC_in1_f3 0
5728 static void in1_la1(DisasContext *s, DisasOps *o)
5730 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5732 #define SPEC_in1_la1 0
5734 static void in1_la2(DisasContext *s, DisasOps *o)
5736 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5737 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5739 #define SPEC_in1_la2 0
5741 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5743 in1_la1(s, o);
5744 o->in1 = tcg_temp_new_i64();
5745 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5747 #define SPEC_in1_m1_8u 0
5749 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5751 in1_la1(s, o);
5752 o->in1 = tcg_temp_new_i64();
5753 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5755 #define SPEC_in1_m1_16s 0
5757 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5759 in1_la1(s, o);
5760 o->in1 = tcg_temp_new_i64();
5761 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5763 #define SPEC_in1_m1_16u 0
5765 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5767 in1_la1(s, o);
5768 o->in1 = tcg_temp_new_i64();
5769 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5771 #define SPEC_in1_m1_32s 0
5773 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5775 in1_la1(s, o);
5776 o->in1 = tcg_temp_new_i64();
5777 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5779 #define SPEC_in1_m1_32u 0
5781 static void in1_m1_64(DisasContext *s, DisasOps *o)
5783 in1_la1(s, o);
5784 o->in1 = tcg_temp_new_i64();
5785 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5787 #define SPEC_in1_m1_64 0
5789 /* ====================================================================== */
5790 /* The "INput 2" generators. These load the second operand to an insn. */
5792 static void in2_r1_o(DisasContext *s, DisasOps *o)
5794 o->in2 = regs[get_field(s, r1)];
5795 o->g_in2 = true;
5797 #define SPEC_in2_r1_o 0
5799 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5801 o->in2 = tcg_temp_new_i64();
5802 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5804 #define SPEC_in2_r1_16u 0
5806 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5808 o->in2 = tcg_temp_new_i64();
5809 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5811 #define SPEC_in2_r1_32u 0
5813 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5815 int r1 = get_field(s, r1);
5816 o->in2 = tcg_temp_new_i64();
5817 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5819 #define SPEC_in2_r1_D32 SPEC_r1_even
5821 static void in2_r2(DisasContext *s, DisasOps *o)
5823 o->in2 = load_reg(get_field(s, r2));
5825 #define SPEC_in2_r2 0
5827 static void in2_r2_o(DisasContext *s, DisasOps *o)
5829 o->in2 = regs[get_field(s, r2)];
5830 o->g_in2 = true;
5832 #define SPEC_in2_r2_o 0
5834 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5836 int r2 = get_field(s, r2);
5837 if (r2 != 0) {
5838 o->in2 = load_reg(r2);
5841 #define SPEC_in2_r2_nz 0
5843 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5845 o->in2 = tcg_temp_new_i64();
5846 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5848 #define SPEC_in2_r2_8s 0
5850 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5852 o->in2 = tcg_temp_new_i64();
5853 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5855 #define SPEC_in2_r2_8u 0
5857 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5859 o->in2 = tcg_temp_new_i64();
5860 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5862 #define SPEC_in2_r2_16s 0
5864 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5866 o->in2 = tcg_temp_new_i64();
5867 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5869 #define SPEC_in2_r2_16u 0
5871 static void in2_r3(DisasContext *s, DisasOps *o)
5873 o->in2 = load_reg(get_field(s, r3));
5875 #define SPEC_in2_r3 0
5877 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5879 o->in2 = tcg_temp_new_i64();
5880 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5882 #define SPEC_in2_r3_sr32 0
5884 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5886 o->in2 = tcg_temp_new_i64();
5887 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5889 #define SPEC_in2_r3_32u 0
5891 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5893 o->in2 = tcg_temp_new_i64();
5894 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5896 #define SPEC_in2_r2_32s 0
5898 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5900 o->in2 = tcg_temp_new_i64();
5901 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5903 #define SPEC_in2_r2_32u 0
5905 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5907 o->in2 = tcg_temp_new_i64();
5908 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5910 #define SPEC_in2_r2_sr32 0
5912 static void in2_e2(DisasContext *s, DisasOps *o)
5914 o->in2 = load_freg32_i64(get_field(s, r2));
5916 #define SPEC_in2_e2 0
5918 static void in2_f2(DisasContext *s, DisasOps *o)
5920 o->in2 = load_freg(get_field(s, r2));
5922 #define SPEC_in2_f2 0
5924 /* Load the low double word of an extended (128-bit) format FP number */
5925 static void in2_x2l(DisasContext *s, DisasOps *o)
5927 o->in2 = load_freg(get_field(s, r2) + 2);
5929 #define SPEC_in2_x2l SPEC_r2_f128
5931 static void in2_ra2(DisasContext *s, DisasOps *o)
5933 o->in2 = get_address(s, 0, get_field(s, r2), 0);
5935 #define SPEC_in2_ra2 0
5937 static void in2_a2(DisasContext *s, DisasOps *o)
5939 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5940 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5942 #define SPEC_in2_a2 0
5944 static void in2_ri2(DisasContext *s, DisasOps *o)
5946 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5948 #define SPEC_in2_ri2 0
5950 static void in2_sh32(DisasContext *s, DisasOps *o)
5952 help_l2_shift(s, o, 31);
5954 #define SPEC_in2_sh32 0
5956 static void in2_sh64(DisasContext *s, DisasOps *o)
5958 help_l2_shift(s, o, 63);
5960 #define SPEC_in2_sh64 0
5962 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5964 in2_a2(s, o);
5965 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5967 #define SPEC_in2_m2_8u 0
5969 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5971 in2_a2(s, o);
5972 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5974 #define SPEC_in2_m2_16s 0
5976 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5978 in2_a2(s, o);
5979 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5981 #define SPEC_in2_m2_16u 0
5983 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5985 in2_a2(s, o);
5986 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5988 #define SPEC_in2_m2_32s 0
5990 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5992 in2_a2(s, o);
5993 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5995 #define SPEC_in2_m2_32u 0
5997 #ifndef CONFIG_USER_ONLY
5998 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
6000 in2_a2(s, o);
6001 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
6003 #define SPEC_in2_m2_32ua 0
6004 #endif
6006 static void in2_m2_64(DisasContext *s, DisasOps *o)
6008 in2_a2(s, o);
6009 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6011 #define SPEC_in2_m2_64 0
6013 static void in2_m2_64w(DisasContext *s, DisasOps *o)
6015 in2_a2(s, o);
6016 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6017 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
6019 #define SPEC_in2_m2_64w 0
6021 #ifndef CONFIG_USER_ONLY
6022 static void in2_m2_64a(DisasContext *s, DisasOps *o)
6024 in2_a2(s, o);
6025 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
6027 #define SPEC_in2_m2_64a 0
6028 #endif
6030 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6032 in2_ri2(s, o);
6033 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6035 #define SPEC_in2_mri2_16u 0
6037 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6039 in2_ri2(s, o);
6040 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6042 #define SPEC_in2_mri2_32s 0
6044 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6046 in2_ri2(s, o);
6047 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6049 #define SPEC_in2_mri2_32u 0
6051 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6053 in2_ri2(s, o);
6054 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6056 #define SPEC_in2_mri2_64 0
6058 static void in2_i2(DisasContext *s, DisasOps *o)
6060 o->in2 = tcg_const_i64(get_field(s, i2));
6062 #define SPEC_in2_i2 0
6064 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6066 o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6068 #define SPEC_in2_i2_8u 0
6070 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6072 o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6074 #define SPEC_in2_i2_16u 0
6076 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6078 o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6080 #define SPEC_in2_i2_32u 0
6082 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6084 uint64_t i2 = (uint16_t)get_field(s, i2);
6085 o->in2 = tcg_const_i64(i2 << s->insn->data);
6087 #define SPEC_in2_i2_16u_shl 0
6089 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6091 uint64_t i2 = (uint32_t)get_field(s, i2);
6092 o->in2 = tcg_const_i64(i2 << s->insn->data);
6094 #define SPEC_in2_i2_32u_shl 0
6096 #ifndef CONFIG_USER_ONLY
6097 static void in2_insn(DisasContext *s, DisasOps *o)
6099 o->in2 = tcg_const_i64(s->fields.raw_insn);
6101 #define SPEC_in2_insn 0
6102 #endif
6104 /* ====================================================================== */
6106 /* Find opc within the table of insns. This is formulated as a switch
6107 statement so that (1) we get compile-time notice of cut-paste errors
6108 for duplicated opcodes, and (2) the compiler generates the binary
6109 search tree, rather than us having to post-process the table. */
6111 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6112 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6114 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6115 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6117 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6118 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6120 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6122 enum DisasInsnEnum {
6123 #include "insn-data.def"
6126 #undef E
6127 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6128 .opc = OPC, \
6129 .flags = FL, \
6130 .fmt = FMT_##FT, \
6131 .fac = FAC_##FC, \
6132 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6133 .name = #NM, \
6134 .help_in1 = in1_##I1, \
6135 .help_in2 = in2_##I2, \
6136 .help_prep = prep_##P, \
6137 .help_wout = wout_##W, \
6138 .help_cout = cout_##CC, \
6139 .help_op = op_##OP, \
6140 .data = D \
6143 /* Allow 0 to be used for NULL in the table below. */
6144 #define in1_0 NULL
6145 #define in2_0 NULL
6146 #define prep_0 NULL
6147 #define wout_0 NULL
6148 #define cout_0 NULL
6149 #define op_0 NULL
6151 #define SPEC_in1_0 0
6152 #define SPEC_in2_0 0
6153 #define SPEC_prep_0 0
6154 #define SPEC_wout_0 0
6156 /* Give smaller names to the various facilities. */
6157 #define FAC_Z S390_FEAT_ZARCH
6158 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6159 #define FAC_DFP S390_FEAT_DFP
6160 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6161 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6162 #define FAC_EE S390_FEAT_EXECUTE_EXT
6163 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6164 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6165 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6166 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6167 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6168 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6169 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6170 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6171 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6172 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6173 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6174 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6175 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6176 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6177 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6178 #define FAC_SFLE S390_FEAT_STFLE
6179 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6180 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6181 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6182 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6183 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6184 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6185 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6186 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6187 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6188 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6189 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6190 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6191 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6192 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6193 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6194 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6195 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6196 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6197 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6198 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6200 static const DisasInsn insn_info[] = {
6201 #include "insn-data.def"
6204 #undef E
6205 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6206 case OPC: return &insn_info[insn_ ## NM];
6208 static const DisasInsn *lookup_opc(uint16_t opc)
6210 switch (opc) {
6211 #include "insn-data.def"
6212 default:
6213 return NULL;
6217 #undef F
6218 #undef E
6219 #undef D
6220 #undef C
6222 /* Extract a field from the insn. The INSN should be left-aligned in
6223 the uint64_t so that we can more easily utilize the big-bit-endian
6224 definitions we extract from the Principals of Operation. */
6226 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6228 uint32_t r, m;
6230 if (f->size == 0) {
6231 return;
6234 /* Zero extract the field from the insn. */
6235 r = (insn << f->beg) >> (64 - f->size);
6237 /* Sign-extend, or un-swap the field as necessary. */
6238 switch (f->type) {
6239 case 0: /* unsigned */
6240 break;
6241 case 1: /* signed */
6242 assert(f->size <= 32);
6243 m = 1u << (f->size - 1);
6244 r = (r ^ m) - m;
6245 break;
6246 case 2: /* dl+dh split, signed 20 bit. */
6247 r = ((int8_t)r << 12) | (r >> 8);
6248 break;
6249 case 3: /* MSB stored in RXB */
6250 g_assert(f->size == 4);
6251 switch (f->beg) {
6252 case 8:
6253 r |= extract64(insn, 63 - 36, 1) << 4;
6254 break;
6255 case 12:
6256 r |= extract64(insn, 63 - 37, 1) << 4;
6257 break;
6258 case 16:
6259 r |= extract64(insn, 63 - 38, 1) << 4;
6260 break;
6261 case 32:
6262 r |= extract64(insn, 63 - 39, 1) << 4;
6263 break;
6264 default:
6265 g_assert_not_reached();
6267 break;
6268 default:
6269 abort();
6272 /* Validate that the "compressed" encoding we selected above is valid.
6273 I.e. we havn't make two different original fields overlap. */
6274 assert(((o->presentC >> f->indexC) & 1) == 0);
6275 o->presentC |= 1 << f->indexC;
6276 o->presentO |= 1 << f->indexO;
6278 o->c[f->indexC] = r;
6281 /* Lookup the insn at the current PC, extracting the operands into O and
6282 returning the info struct for the insn. Returns NULL for invalid insn. */
6284 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6286 uint64_t insn, pc = s->base.pc_next;
6287 int op, op2, ilen;
6288 const DisasInsn *info;
6290 if (unlikely(s->ex_value)) {
6291 /* Drop the EX data now, so that it's clear on exception paths. */
6292 TCGv_i64 zero = tcg_const_i64(0);
6293 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6294 tcg_temp_free_i64(zero);
6296 /* Extract the values saved by EXECUTE. */
6297 insn = s->ex_value & 0xffffffffffff0000ull;
6298 ilen = s->ex_value & 0xf;
6299 op = insn >> 56;
6300 } else {
6301 insn = ld_code2(env, pc);
6302 op = (insn >> 8) & 0xff;
6303 ilen = get_ilen(op);
6304 switch (ilen) {
6305 case 2:
6306 insn = insn << 48;
6307 break;
6308 case 4:
6309 insn = ld_code4(env, pc) << 32;
6310 break;
6311 case 6:
6312 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6313 break;
6314 default:
6315 g_assert_not_reached();
6318 s->pc_tmp = s->base.pc_next + ilen;
6319 s->ilen = ilen;
6321 /* We can't actually determine the insn format until we've looked up
6322 the full insn opcode. Which we can't do without locating the
6323 secondary opcode. Assume by default that OP2 is at bit 40; for
6324 those smaller insns that don't actually have a secondary opcode
6325 this will correctly result in OP2 = 0. */
6326 switch (op) {
6327 case 0x01: /* E */
6328 case 0x80: /* S */
6329 case 0x82: /* S */
6330 case 0x93: /* S */
6331 case 0xb2: /* S, RRF, RRE, IE */
6332 case 0xb3: /* RRE, RRD, RRF */
6333 case 0xb9: /* RRE, RRF */
6334 case 0xe5: /* SSE, SIL */
6335 op2 = (insn << 8) >> 56;
6336 break;
6337 case 0xa5: /* RI */
6338 case 0xa7: /* RI */
6339 case 0xc0: /* RIL */
6340 case 0xc2: /* RIL */
6341 case 0xc4: /* RIL */
6342 case 0xc6: /* RIL */
6343 case 0xc8: /* SSF */
6344 case 0xcc: /* RIL */
6345 op2 = (insn << 12) >> 60;
6346 break;
6347 case 0xc5: /* MII */
6348 case 0xc7: /* SMI */
6349 case 0xd0 ... 0xdf: /* SS */
6350 case 0xe1: /* SS */
6351 case 0xe2: /* SS */
6352 case 0xe8: /* SS */
6353 case 0xe9: /* SS */
6354 case 0xea: /* SS */
6355 case 0xee ... 0xf3: /* SS */
6356 case 0xf8 ... 0xfd: /* SS */
6357 op2 = 0;
6358 break;
6359 default:
6360 op2 = (insn << 40) >> 56;
6361 break;
6364 memset(&s->fields, 0, sizeof(s->fields));
6365 s->fields.raw_insn = insn;
6366 s->fields.op = op;
6367 s->fields.op2 = op2;
6369 /* Lookup the instruction. */
6370 info = lookup_opc(op << 8 | op2);
6371 s->insn = info;
6373 /* If we found it, extract the operands. */
6374 if (info != NULL) {
6375 DisasFormat fmt = info->fmt;
6376 int i;
6378 for (i = 0; i < NUM_C_FIELD; ++i) {
6379 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6382 return info;
6385 static bool is_afp_reg(int reg)
6387 return reg % 2 || reg > 6;
6390 static bool is_fp_pair(int reg)
6392 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6393 return !(reg & 0x2);
6396 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6398 const DisasInsn *insn;
6399 DisasJumpType ret = DISAS_NEXT;
6400 DisasOps o = {};
6401 bool icount = false;
6403 /* Search for the insn in the table. */
6404 insn = extract_insn(env, s);
6406 /* Emit insn_start now that we know the ILEN. */
6407 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6409 /* Not found means unimplemented/illegal opcode. */
6410 if (insn == NULL) {
6411 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6412 s->fields.op, s->fields.op2);
6413 gen_illegal_opcode(s);
6414 return DISAS_NORETURN;
6417 #ifndef CONFIG_USER_ONLY
6418 if (s->base.tb->flags & FLAG_MASK_PER) {
6419 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6420 gen_helper_per_ifetch(cpu_env, addr);
6421 tcg_temp_free_i64(addr);
6423 #endif
6425 /* process flags */
6426 if (insn->flags) {
6427 /* privileged instruction */
6428 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6429 gen_program_exception(s, PGM_PRIVILEGED);
6430 return DISAS_NORETURN;
6433 /* if AFP is not enabled, instructions and registers are forbidden */
6434 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6435 uint8_t dxc = 0;
6437 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6438 dxc = 1;
6440 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6441 dxc = 1;
6443 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6444 dxc = 1;
6446 if (insn->flags & IF_BFP) {
6447 dxc = 2;
6449 if (insn->flags & IF_DFP) {
6450 dxc = 3;
6452 if (insn->flags & IF_VEC) {
6453 dxc = 0xfe;
6455 if (dxc) {
6456 gen_data_exception(dxc);
6457 return DISAS_NORETURN;
6461 /* if vector instructions not enabled, executing them is forbidden */
6462 if (insn->flags & IF_VEC) {
6463 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6464 gen_data_exception(0xfe);
6465 return DISAS_NORETURN;
6469 /* input/output is the special case for icount mode */
6470 if (unlikely(insn->flags & IF_IO)) {
6471 icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6472 if (icount) {
6473 gen_io_start();
6478 /* Check for insn specification exceptions. */
6479 if (insn->spec) {
6480 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6481 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6482 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6483 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6484 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6485 gen_program_exception(s, PGM_SPECIFICATION);
6486 return DISAS_NORETURN;
6490 /* Implement the instruction. */
6491 if (insn->help_in1) {
6492 insn->help_in1(s, &o);
6494 if (insn->help_in2) {
6495 insn->help_in2(s, &o);
6497 if (insn->help_prep) {
6498 insn->help_prep(s, &o);
6500 if (insn->help_op) {
6501 ret = insn->help_op(s, &o);
6503 if (ret != DISAS_NORETURN) {
6504 if (insn->help_wout) {
6505 insn->help_wout(s, &o);
6507 if (insn->help_cout) {
6508 insn->help_cout(s, &o);
6512 /* Free any temporaries created by the helpers. */
6513 if (o.out && !o.g_out) {
6514 tcg_temp_free_i64(o.out);
6516 if (o.out2 && !o.g_out2) {
6517 tcg_temp_free_i64(o.out2);
6519 if (o.in1 && !o.g_in1) {
6520 tcg_temp_free_i64(o.in1);
6522 if (o.in2 && !o.g_in2) {
6523 tcg_temp_free_i64(o.in2);
6525 if (o.addr1) {
6526 tcg_temp_free_i64(o.addr1);
6529 /* io should be the last instruction in tb when icount is enabled */
6530 if (unlikely(icount && ret == DISAS_NEXT)) {
6531 ret = DISAS_PC_STALE;
6534 #ifndef CONFIG_USER_ONLY
6535 if (s->base.tb->flags & FLAG_MASK_PER) {
6536 /* An exception might be triggered, save PSW if not already done. */
6537 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6538 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6541 /* Call the helper to check for a possible PER exception. */
6542 gen_helper_per_check_exception(cpu_env);
6544 #endif
6546 /* Advance to the next instruction. */
6547 s->base.pc_next = s->pc_tmp;
6548 return ret;
6551 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6553 DisasContext *dc = container_of(dcbase, DisasContext, base);
6555 /* 31-bit mode */
6556 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6557 dc->base.pc_first &= 0x7fffffff;
6558 dc->base.pc_next = dc->base.pc_first;
6561 dc->cc_op = CC_OP_DYNAMIC;
6562 dc->ex_value = dc->base.tb->cs_base;
6563 dc->do_debug = dc->base.singlestep_enabled;
6566 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6570 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6574 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6575 const CPUBreakpoint *bp)
6577 DisasContext *dc = container_of(dcbase, DisasContext, base);
6580 * Emit an insn_start to accompany the breakpoint exception.
6581 * The ILEN value is a dummy, since this does not result in
6582 * an s390x exception, but an internal qemu exception which
6583 * brings us back to interact with the gdbstub.
6585 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6587 dc->base.is_jmp = DISAS_PC_STALE;
6588 dc->do_debug = true;
6589 /* The address covered by the breakpoint must be included in
6590 [tb->pc, tb->pc + tb->size) in order to for it to be
6591 properly cleared -- thus we increment the PC here so that
6592 the logic setting tb->size does the right thing. */
6593 dc->base.pc_next += 2;
6594 return true;
6597 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6599 CPUS390XState *env = cs->env_ptr;
6600 DisasContext *dc = container_of(dcbase, DisasContext, base);
6602 dc->base.is_jmp = translate_one(env, dc);
6603 if (dc->base.is_jmp == DISAS_NEXT) {
6604 uint64_t page_start;
6606 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6607 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6608 dc->base.is_jmp = DISAS_TOO_MANY;
6613 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6615 DisasContext *dc = container_of(dcbase, DisasContext, base);
6617 switch (dc->base.is_jmp) {
6618 case DISAS_GOTO_TB:
6619 case DISAS_NORETURN:
6620 break;
6621 case DISAS_TOO_MANY:
6622 case DISAS_PC_STALE:
6623 case DISAS_PC_STALE_NOCHAIN:
6624 update_psw_addr(dc);
6625 /* FALLTHRU */
6626 case DISAS_PC_UPDATED:
6627 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6628 cc op type is in env */
6629 update_cc_op(dc);
6630 /* FALLTHRU */
6631 case DISAS_PC_CC_UPDATED:
6632 /* Exit the TB, either by raising a debug exception or by return. */
6633 if (dc->do_debug) {
6634 gen_exception(EXCP_DEBUG);
6635 } else if (use_exit_tb(dc) ||
6636 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6637 tcg_gen_exit_tb(NULL, 0);
6638 } else {
6639 tcg_gen_lookup_and_goto_ptr();
6641 break;
6642 default:
6643 g_assert_not_reached();
6647 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6649 DisasContext *dc = container_of(dcbase, DisasContext, base);
6651 if (unlikely(dc->ex_value)) {
6652 /* ??? Unfortunately log_target_disas can't use host memory. */
6653 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6654 } else {
6655 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6656 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6660 static const TranslatorOps s390x_tr_ops = {
6661 .init_disas_context = s390x_tr_init_disas_context,
6662 .tb_start = s390x_tr_tb_start,
6663 .insn_start = s390x_tr_insn_start,
6664 .breakpoint_check = s390x_tr_breakpoint_check,
6665 .translate_insn = s390x_tr_translate_insn,
6666 .tb_stop = s390x_tr_tb_stop,
6667 .disas_log = s390x_tr_disas_log,
6670 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6672 DisasContext dc;
6674 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6677 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6678 target_ulong *data)
6680 int cc_op = data[1];
6682 env->psw.addr = data[0];
6684 /* Update the CC opcode if it is not already up-to-date. */
6685 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6686 env->cc_op = cc_op;
6689 /* Record ILEN. */
6690 env->int_pgm_ilen = data[2];