tests/9pfs: Factor out do_version() helper
[qemu/ar7.git] / target / s390x / translate.c
blobac10f42f1045d35e32d33bb8578ef97b745e927a
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
64 enum DisasFieldIndexO {
65 FLD_O_r1,
66 FLD_O_r2,
67 FLD_O_r3,
68 FLD_O_m1,
69 FLD_O_m3,
70 FLD_O_m4,
71 FLD_O_m5,
72 FLD_O_m6,
73 FLD_O_b1,
74 FLD_O_b2,
75 FLD_O_b4,
76 FLD_O_d1,
77 FLD_O_d2,
78 FLD_O_d4,
79 FLD_O_x2,
80 FLD_O_l1,
81 FLD_O_l2,
82 FLD_O_i1,
83 FLD_O_i2,
84 FLD_O_i3,
85 FLD_O_i4,
86 FLD_O_i5,
87 FLD_O_v1,
88 FLD_O_v2,
89 FLD_O_v3,
90 FLD_O_v4,
93 enum DisasFieldIndexC {
94 FLD_C_r1 = 0,
95 FLD_C_m1 = 0,
96 FLD_C_b1 = 0,
97 FLD_C_i1 = 0,
98 FLD_C_v1 = 0,
100 FLD_C_r2 = 1,
101 FLD_C_b2 = 1,
102 FLD_C_i2 = 1,
104 FLD_C_r3 = 2,
105 FLD_C_m3 = 2,
106 FLD_C_i3 = 2,
107 FLD_C_v3 = 2,
109 FLD_C_m4 = 3,
110 FLD_C_b4 = 3,
111 FLD_C_i4 = 3,
112 FLD_C_l1 = 3,
113 FLD_C_v4 = 3,
115 FLD_C_i5 = 4,
116 FLD_C_d1 = 4,
117 FLD_C_m5 = 4,
119 FLD_C_d2 = 5,
120 FLD_C_m6 = 5,
122 FLD_C_d4 = 6,
123 FLD_C_x2 = 6,
124 FLD_C_l2 = 6,
125 FLD_C_v2 = 6,
127 NUM_C_FIELD = 7
130 struct DisasFields {
131 uint64_t raw_insn;
132 unsigned op:8;
133 unsigned op2:8;
134 unsigned presentC:16;
135 unsigned int presentO;
136 int c[NUM_C_FIELD];
139 struct DisasContext {
140 DisasContextBase base;
141 const DisasInsn *insn;
142 DisasFields fields;
143 uint64_t ex_value;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 bool do_debug;
155 /* Information carried about a condition to be evaluated. */
156 typedef struct {
157 TCGCond cond:8;
158 bool is_64;
159 bool g1;
160 bool g2;
161 union {
162 struct { TCGv_i64 a, b; } s64;
163 struct { TCGv_i32 a, b; } s32;
164 } u;
165 } DisasCompare;
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 TCGv_i64 tmp;
176 if (s->base.tb->flags & FLAG_MASK_32) {
177 if (s->base.tb->flags & FLAG_MASK_64) {
178 tcg_gen_movi_i64(out, pc);
179 return;
181 pc |= 0x80000000;
183 assert(!(s->base.tb->flags & FLAG_MASK_64));
184 tmp = tcg_const_i64(pc);
185 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186 tcg_temp_free_i64(tmp);
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
201 void s390x_translate_init(void)
203 int i;
205 psw_addr = tcg_global_mem_new_i64(cpu_env,
206 offsetof(CPUS390XState, psw.addr),
207 "psw_addr");
208 psw_mask = tcg_global_mem_new_i64(cpu_env,
209 offsetof(CPUS390XState, psw.mask),
210 "psw_mask");
211 gbea = tcg_global_mem_new_i64(cpu_env,
212 offsetof(CPUS390XState, gbea),
213 "gbea");
215 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216 "cc_op");
217 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218 "cc_src");
219 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220 "cc_dst");
221 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222 "cc_vr");
224 for (i = 0; i < 16; i++) {
225 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226 regs[i] = tcg_global_mem_new(cpu_env,
227 offsetof(CPUS390XState, regs[i]),
228 cpu_reg_names[i]);
232 static inline int vec_full_reg_offset(uint8_t reg)
234 g_assert(reg < 32);
235 return offsetof(CPUS390XState, vregs[reg][0]);
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes = 1 << es;
242 int offs = enr * bytes;
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
252 * DW: [ 0] - [ 1]
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
258 * DW: [ 0] - [ 1]
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
266 g_assert(es <= MO_64);
267 #ifndef HOST_WORDS_BIGENDIAN
268 offs ^= (8 - bytes);
269 #endif
270 return offs + vec_full_reg_offset(reg);
273 static inline int freg64_offset(uint8_t reg)
275 g_assert(reg < 16);
276 return vec_reg_offset(reg, 0, MO_64);
279 static inline int freg32_offset(uint8_t reg)
281 g_assert(reg < 16);
282 return vec_reg_offset(reg, 0, MO_32);
285 static TCGv_i64 load_reg(int reg)
287 TCGv_i64 r = tcg_temp_new_i64();
288 tcg_gen_mov_i64(r, regs[reg]);
289 return r;
292 static TCGv_i64 load_freg(int reg)
294 TCGv_i64 r = tcg_temp_new_i64();
296 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297 return r;
300 static TCGv_i64 load_freg32_i64(int reg)
302 TCGv_i64 r = tcg_temp_new_i64();
304 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305 return r;
308 static void store_reg(int reg, TCGv_i64 v)
310 tcg_gen_mov_i64(regs[reg], v);
313 static void store_freg(int reg, TCGv_i64 v)
315 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
318 static void store_reg32_i64(int reg, TCGv_i64 v)
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
326 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
329 static void store_freg32_i64(int reg, TCGv_i64 v)
331 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
334 static void return_low128(TCGv_i64 dest)
336 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
339 static void update_psw_addr(DisasContext *s)
341 /* psw.addr */
342 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 static void per_branch(DisasContext *s, bool to_next)
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea, s->base.pc_next);
350 if (s->base.tb->flags & FLAG_MASK_PER) {
351 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352 gen_helper_per_branch(cpu_env, gbea, next_pc);
353 if (to_next) {
354 tcg_temp_free_i64(next_pc);
357 #endif
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361 TCGv_i64 arg1, TCGv_i64 arg2)
363 #ifndef CONFIG_USER_ONLY
364 if (s->base.tb->flags & FLAG_MASK_PER) {
365 TCGLabel *lab = gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 tcg_gen_movi_i64(gbea, s->base.pc_next);
369 gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 gen_set_label(lab);
372 } else {
373 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 tcg_temp_free_i64(pc);
377 #endif
380 static void per_breaking_event(DisasContext *s)
382 tcg_gen_movi_i64(gbea, s->base.pc_next);
385 static void update_cc_op(DisasContext *s)
387 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388 tcg_gen_movi_i32(cc_op, s->cc_op);
392 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
394 return (uint64_t)cpu_lduw_code(env, pc);
397 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
399 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
402 static int get_mem_index(DisasContext *s)
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 tcg_abort();
420 break;
422 #endif
425 static void gen_exception(int excp)
427 TCGv_i32 tmp = tcg_const_i32(excp);
428 gen_helper_exception(cpu_env, tmp);
429 tcg_temp_free_i32(tmp);
432 static void gen_program_exception(DisasContext *s, int code)
434 TCGv_i32 tmp;
436 /* Remember what pgm exeption this was. */
437 tmp = tcg_const_i32(code);
438 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
439 tcg_temp_free_i32(tmp);
441 tmp = tcg_const_i32(s->ilen);
442 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
443 tcg_temp_free_i32(tmp);
445 /* update the psw */
446 update_psw_addr(s);
448 /* Save off cc. */
449 update_cc_op(s);
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM);
455 static inline void gen_illegal_opcode(DisasContext *s)
457 gen_program_exception(s, PGM_OPERATION);
460 static inline void gen_data_exception(uint8_t dxc)
462 TCGv_i32 tmp = tcg_const_i32(dxc);
463 gen_helper_data_exception(cpu_env, tmp);
464 tcg_temp_free_i32(tmp);
467 static inline void gen_trap(DisasContext *s)
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
473 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
474 int64_t imm)
476 tcg_gen_addi_i64(dst, src, imm);
477 if (!(s->base.tb->flags & FLAG_MASK_64)) {
478 if (s->base.tb->flags & FLAG_MASK_32) {
479 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
480 } else {
481 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
486 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
488 TCGv_i64 tmp = tcg_temp_new_i64();
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
494 if (b2 && x2) {
495 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
496 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
497 } else if (b2) {
498 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
499 } else if (x2) {
500 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
501 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
502 if (s->base.tb->flags & FLAG_MASK_32) {
503 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
504 } else {
505 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
507 } else {
508 tcg_gen_movi_i64(tmp, d2);
511 return tmp;
514 static inline bool live_cc_data(DisasContext *s)
516 return (s->cc_op != CC_OP_DYNAMIC
517 && s->cc_op != CC_OP_STATIC
518 && s->cc_op > 3);
521 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
523 if (live_cc_data(s)) {
524 tcg_gen_discard_i64(cc_src);
525 tcg_gen_discard_i64(cc_dst);
526 tcg_gen_discard_i64(cc_vr);
528 s->cc_op = CC_OP_CONST0 + val;
531 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
533 if (live_cc_data(s)) {
534 tcg_gen_discard_i64(cc_src);
535 tcg_gen_discard_i64(cc_vr);
537 tcg_gen_mov_i64(cc_dst, dst);
538 s->cc_op = op;
541 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542 TCGv_i64 dst)
544 if (live_cc_data(s)) {
545 tcg_gen_discard_i64(cc_vr);
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 s->cc_op = op;
552 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
553 TCGv_i64 dst, TCGv_i64 vr)
555 tcg_gen_mov_i64(cc_src, src);
556 tcg_gen_mov_i64(cc_dst, dst);
557 tcg_gen_mov_i64(cc_vr, vr);
558 s->cc_op = op;
561 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
563 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
566 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
568 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
571 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
573 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
576 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
578 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext *s)
584 if (live_cc_data(s)) {
585 tcg_gen_discard_i64(cc_src);
586 tcg_gen_discard_i64(cc_dst);
587 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext *s)
595 TCGv_i32 local_cc_op = NULL;
596 TCGv_i64 dummy = NULL;
598 switch (s->cc_op) {
599 default:
600 dummy = tcg_const_i64(0);
601 /* FALLTHRU */
602 case CC_OP_ADD_64:
603 case CC_OP_ADDU_64:
604 case CC_OP_ADDC_64:
605 case CC_OP_SUB_64:
606 case CC_OP_SUBU_64:
607 case CC_OP_SUBB_64:
608 case CC_OP_ADD_32:
609 case CC_OP_ADDU_32:
610 case CC_OP_ADDC_32:
611 case CC_OP_SUB_32:
612 case CC_OP_SUBU_32:
613 case CC_OP_SUBB_32:
614 local_cc_op = tcg_const_i32(s->cc_op);
615 break;
616 case CC_OP_CONST0:
617 case CC_OP_CONST1:
618 case CC_OP_CONST2:
619 case CC_OP_CONST3:
620 case CC_OP_STATIC:
621 case CC_OP_DYNAMIC:
622 break;
625 switch (s->cc_op) {
626 case CC_OP_CONST0:
627 case CC_OP_CONST1:
628 case CC_OP_CONST2:
629 case CC_OP_CONST3:
630 /* s->cc_op is the cc value */
631 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
632 break;
633 case CC_OP_STATIC:
634 /* env->cc_op already is the cc value */
635 break;
636 case CC_OP_NZ:
637 case CC_OP_ABS_64:
638 case CC_OP_NABS_64:
639 case CC_OP_ABS_32:
640 case CC_OP_NABS_32:
641 case CC_OP_LTGT0_32:
642 case CC_OP_LTGT0_64:
643 case CC_OP_COMP_32:
644 case CC_OP_COMP_64:
645 case CC_OP_NZ_F32:
646 case CC_OP_NZ_F64:
647 case CC_OP_FLOGR:
648 case CC_OP_LCBB:
649 case CC_OP_MULS_32:
650 /* 1 argument */
651 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
652 break;
653 case CC_OP_ICM:
654 case CC_OP_LTGT_32:
655 case CC_OP_LTGT_64:
656 case CC_OP_LTUGTU_32:
657 case CC_OP_LTUGTU_64:
658 case CC_OP_TM_32:
659 case CC_OP_TM_64:
660 case CC_OP_SLA_32:
661 case CC_OP_SLA_64:
662 case CC_OP_NZ_F128:
663 case CC_OP_VC:
664 case CC_OP_MULS_64:
665 /* 2 arguments */
666 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
667 break;
668 case CC_OP_ADD_64:
669 case CC_OP_ADDU_64:
670 case CC_OP_ADDC_64:
671 case CC_OP_SUB_64:
672 case CC_OP_SUBU_64:
673 case CC_OP_SUBB_64:
674 case CC_OP_ADD_32:
675 case CC_OP_ADDU_32:
676 case CC_OP_ADDC_32:
677 case CC_OP_SUB_32:
678 case CC_OP_SUBU_32:
679 case CC_OP_SUBB_32:
680 /* 3 arguments */
681 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
682 break;
683 case CC_OP_DYNAMIC:
684 /* unknown operation - assume 3 arguments and cc_op in env */
685 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
686 break;
687 default:
688 tcg_abort();
691 if (local_cc_op) {
692 tcg_temp_free_i32(local_cc_op);
694 if (dummy) {
695 tcg_temp_free_i64(dummy);
698 /* We now have cc in cc_op as constant */
699 set_cc_static(s);
702 static bool use_exit_tb(DisasContext *s)
704 return s->base.singlestep_enabled ||
705 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
706 (s->base.tb->flags & FLAG_MASK_PER);
709 static bool use_goto_tb(DisasContext *s, uint64_t dest)
711 if (unlikely(use_exit_tb(s))) {
712 return false;
714 #ifndef CONFIG_USER_ONLY
715 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
716 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
717 #else
718 return true;
719 #endif
722 static void account_noninline_branch(DisasContext *s, int cc_op)
724 #ifdef DEBUG_INLINE_BRANCHES
725 inline_branch_miss[cc_op]++;
726 #endif
729 static void account_inline_branch(DisasContext *s, int cc_op)
731 #ifdef DEBUG_INLINE_BRANCHES
732 inline_branch_hit[cc_op]++;
733 #endif
736 /* Table of mask values to comparison codes, given a comparison as input.
737 For such, CC=3 should not be possible. */
738 static const TCGCond ltgt_cond[16] = {
739 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
740 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
741 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
742 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
743 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
744 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
745 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
746 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
749 /* Table of mask values to comparison codes, given a logic op as input.
750 For such, only CC=0 and CC=1 should be possible. */
751 static const TCGCond nz_cond[16] = {
752 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
753 TCG_COND_NEVER, TCG_COND_NEVER,
754 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
755 TCG_COND_NE, TCG_COND_NE,
756 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
757 TCG_COND_EQ, TCG_COND_EQ,
758 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
759 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
762 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
763 details required to generate a TCG comparison. */
764 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
766 TCGCond cond;
767 enum cc_op old_cc_op = s->cc_op;
769 if (mask == 15 || mask == 0) {
770 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
771 c->u.s32.a = cc_op;
772 c->u.s32.b = cc_op;
773 c->g1 = c->g2 = true;
774 c->is_64 = false;
775 return;
778 /* Find the TCG condition for the mask + cc op. */
779 switch (old_cc_op) {
780 case CC_OP_LTGT0_32:
781 case CC_OP_LTGT0_64:
782 case CC_OP_LTGT_32:
783 case CC_OP_LTGT_64:
784 cond = ltgt_cond[mask];
785 if (cond == TCG_COND_NEVER) {
786 goto do_dynamic;
788 account_inline_branch(s, old_cc_op);
789 break;
791 case CC_OP_LTUGTU_32:
792 case CC_OP_LTUGTU_64:
793 cond = tcg_unsigned_cond(ltgt_cond[mask]);
794 if (cond == TCG_COND_NEVER) {
795 goto do_dynamic;
797 account_inline_branch(s, old_cc_op);
798 break;
800 case CC_OP_NZ:
801 cond = nz_cond[mask];
802 if (cond == TCG_COND_NEVER) {
803 goto do_dynamic;
805 account_inline_branch(s, old_cc_op);
806 break;
808 case CC_OP_TM_32:
809 case CC_OP_TM_64:
810 switch (mask) {
811 case 8:
812 cond = TCG_COND_EQ;
813 break;
814 case 4 | 2 | 1:
815 cond = TCG_COND_NE;
816 break;
817 default:
818 goto do_dynamic;
820 account_inline_branch(s, old_cc_op);
821 break;
823 case CC_OP_ICM:
824 switch (mask) {
825 case 8:
826 cond = TCG_COND_EQ;
827 break;
828 case 4 | 2 | 1:
829 case 4 | 2:
830 cond = TCG_COND_NE;
831 break;
832 default:
833 goto do_dynamic;
835 account_inline_branch(s, old_cc_op);
836 break;
838 case CC_OP_FLOGR:
839 switch (mask & 0xa) {
840 case 8: /* src == 0 -> no one bit found */
841 cond = TCG_COND_EQ;
842 break;
843 case 2: /* src != 0 -> one bit found */
844 cond = TCG_COND_NE;
845 break;
846 default:
847 goto do_dynamic;
849 account_inline_branch(s, old_cc_op);
850 break;
852 case CC_OP_ADDU_32:
853 case CC_OP_ADDU_64:
854 switch (mask) {
855 case 8 | 2: /* vr == 0 */
856 cond = TCG_COND_EQ;
857 break;
858 case 4 | 1: /* vr != 0 */
859 cond = TCG_COND_NE;
860 break;
861 case 8 | 4: /* no carry -> vr >= src */
862 cond = TCG_COND_GEU;
863 break;
864 case 2 | 1: /* carry -> vr < src */
865 cond = TCG_COND_LTU;
866 break;
867 default:
868 goto do_dynamic;
870 account_inline_branch(s, old_cc_op);
871 break;
873 case CC_OP_SUBU_32:
874 case CC_OP_SUBU_64:
875 /* Note that CC=0 is impossible; treat it as dont-care. */
876 switch (mask & 7) {
877 case 2: /* zero -> op1 == op2 */
878 cond = TCG_COND_EQ;
879 break;
880 case 4 | 1: /* !zero -> op1 != op2 */
881 cond = TCG_COND_NE;
882 break;
883 case 4: /* borrow (!carry) -> op1 < op2 */
884 cond = TCG_COND_LTU;
885 break;
886 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
887 cond = TCG_COND_GEU;
888 break;
889 default:
890 goto do_dynamic;
892 account_inline_branch(s, old_cc_op);
893 break;
895 default:
896 do_dynamic:
897 /* Calculate cc value. */
898 gen_op_calc_cc(s);
899 /* FALLTHRU */
901 case CC_OP_STATIC:
902 /* Jump based on CC. We'll load up the real cond below;
903 the assignment here merely avoids a compiler warning. */
904 account_noninline_branch(s, old_cc_op);
905 old_cc_op = CC_OP_STATIC;
906 cond = TCG_COND_NEVER;
907 break;
910 /* Load up the arguments of the comparison. */
911 c->is_64 = true;
912 c->g1 = c->g2 = false;
913 switch (old_cc_op) {
914 case CC_OP_LTGT0_32:
915 c->is_64 = false;
916 c->u.s32.a = tcg_temp_new_i32();
917 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
918 c->u.s32.b = tcg_const_i32(0);
919 break;
920 case CC_OP_LTGT_32:
921 case CC_OP_LTUGTU_32:
922 case CC_OP_SUBU_32:
923 c->is_64 = false;
924 c->u.s32.a = tcg_temp_new_i32();
925 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
926 c->u.s32.b = tcg_temp_new_i32();
927 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
928 break;
930 case CC_OP_LTGT0_64:
931 case CC_OP_NZ:
932 case CC_OP_FLOGR:
933 c->u.s64.a = cc_dst;
934 c->u.s64.b = tcg_const_i64(0);
935 c->g1 = true;
936 break;
937 case CC_OP_LTGT_64:
938 case CC_OP_LTUGTU_64:
939 case CC_OP_SUBU_64:
940 c->u.s64.a = cc_src;
941 c->u.s64.b = cc_dst;
942 c->g1 = c->g2 = true;
943 break;
945 case CC_OP_TM_32:
946 case CC_OP_TM_64:
947 case CC_OP_ICM:
948 c->u.s64.a = tcg_temp_new_i64();
949 c->u.s64.b = tcg_const_i64(0);
950 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
951 break;
953 case CC_OP_ADDU_32:
954 c->is_64 = false;
955 c->u.s32.a = tcg_temp_new_i32();
956 c->u.s32.b = tcg_temp_new_i32();
957 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
958 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
959 tcg_gen_movi_i32(c->u.s32.b, 0);
960 } else {
961 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
963 break;
965 case CC_OP_ADDU_64:
966 c->u.s64.a = cc_vr;
967 c->g1 = true;
968 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
969 c->u.s64.b = tcg_const_i64(0);
970 } else {
971 c->u.s64.b = cc_src;
972 c->g2 = true;
974 break;
976 case CC_OP_STATIC:
977 c->is_64 = false;
978 c->u.s32.a = cc_op;
979 c->g1 = true;
980 switch (mask) {
981 case 0x8 | 0x4 | 0x2: /* cc != 3 */
982 cond = TCG_COND_NE;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 case 0x8 | 0x4 | 0x1: /* cc != 2 */
986 cond = TCG_COND_NE;
987 c->u.s32.b = tcg_const_i32(2);
988 break;
989 case 0x8 | 0x2 | 0x1: /* cc != 1 */
990 cond = TCG_COND_NE;
991 c->u.s32.b = tcg_const_i32(1);
992 break;
993 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
994 cond = TCG_COND_EQ;
995 c->g1 = false;
996 c->u.s32.a = tcg_temp_new_i32();
997 c->u.s32.b = tcg_const_i32(0);
998 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
999 break;
1000 case 0x8 | 0x4: /* cc < 2 */
1001 cond = TCG_COND_LTU;
1002 c->u.s32.b = tcg_const_i32(2);
1003 break;
1004 case 0x8: /* cc == 0 */
1005 cond = TCG_COND_EQ;
1006 c->u.s32.b = tcg_const_i32(0);
1007 break;
1008 case 0x4 | 0x2 | 0x1: /* cc != 0 */
1009 cond = TCG_COND_NE;
1010 c->u.s32.b = tcg_const_i32(0);
1011 break;
1012 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
1013 cond = TCG_COND_NE;
1014 c->g1 = false;
1015 c->u.s32.a = tcg_temp_new_i32();
1016 c->u.s32.b = tcg_const_i32(0);
1017 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
1018 break;
1019 case 0x4: /* cc == 1 */
1020 cond = TCG_COND_EQ;
1021 c->u.s32.b = tcg_const_i32(1);
1022 break;
1023 case 0x2 | 0x1: /* cc > 1 */
1024 cond = TCG_COND_GTU;
1025 c->u.s32.b = tcg_const_i32(1);
1026 break;
1027 case 0x2: /* cc == 2 */
1028 cond = TCG_COND_EQ;
1029 c->u.s32.b = tcg_const_i32(2);
1030 break;
1031 case 0x1: /* cc == 3 */
1032 cond = TCG_COND_EQ;
1033 c->u.s32.b = tcg_const_i32(3);
1034 break;
1035 default:
1036 /* CC is masked by something else: (8 >> cc) & mask. */
1037 cond = TCG_COND_NE;
1038 c->g1 = false;
1039 c->u.s32.a = tcg_const_i32(8);
1040 c->u.s32.b = tcg_const_i32(0);
1041 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
1042 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1043 break;
1045 break;
1047 default:
1048 abort();
1050 c->cond = cond;
1053 static void free_compare(DisasCompare *c)
1055 if (!c->g1) {
1056 if (c->is_64) {
1057 tcg_temp_free_i64(c->u.s64.a);
1058 } else {
1059 tcg_temp_free_i32(c->u.s32.a);
1062 if (!c->g2) {
1063 if (c->is_64) {
1064 tcg_temp_free_i64(c->u.s64.b);
1065 } else {
1066 tcg_temp_free_i32(c->u.s32.b);
1071 /* ====================================================================== */
1072 /* Define the insn format enumeration. */
1073 #define F0(N) FMT_##N,
1074 #define F1(N, X1) F0(N)
1075 #define F2(N, X1, X2) F0(N)
1076 #define F3(N, X1, X2, X3) F0(N)
1077 #define F4(N, X1, X2, X3, X4) F0(N)
1078 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1079 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1081 typedef enum {
1082 #include "insn-format.def"
1083 } DisasFormat;
1085 #undef F0
1086 #undef F1
1087 #undef F2
1088 #undef F3
1089 #undef F4
1090 #undef F5
1091 #undef F6
1093 /* This is the way fields are to be accessed out of DisasFields. */
1094 #define have_field(S, F) have_field1((S), FLD_O_##F)
1095 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1097 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1099 return (s->fields.presentO >> c) & 1;
1102 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1103 enum DisasFieldIndexC c)
1105 assert(have_field1(s, o));
1106 return s->fields.c[c];
1109 /* Describe the layout of each field in each format. */
1110 typedef struct DisasField {
1111 unsigned int beg:8;
1112 unsigned int size:8;
1113 unsigned int type:2;
1114 unsigned int indexC:6;
1115 enum DisasFieldIndexO indexO:8;
1116 } DisasField;
1118 typedef struct DisasFormatInfo {
1119 DisasField op[NUM_C_FIELD];
1120 } DisasFormatInfo;
1122 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1123 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1124 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1125 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1126 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1127 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1128 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1129 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1130 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1131 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1132 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1133 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1134 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1135 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1136 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1138 #define F0(N) { { } },
1139 #define F1(N, X1) { { X1 } },
1140 #define F2(N, X1, X2) { { X1, X2 } },
1141 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1142 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1143 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1144 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1146 static const DisasFormatInfo format_info[] = {
1147 #include "insn-format.def"
1150 #undef F0
1151 #undef F1
1152 #undef F2
1153 #undef F3
1154 #undef F4
1155 #undef F5
1156 #undef F6
1157 #undef R
1158 #undef M
1159 #undef V
1160 #undef BD
1161 #undef BXD
1162 #undef BDL
1163 #undef BXDL
1164 #undef I
1165 #undef L
1167 /* Generally, we'll extract operands into this structures, operate upon
1168 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1169 of routines below for more details. */
1170 typedef struct {
1171 bool g_out, g_out2, g_in1, g_in2;
1172 TCGv_i64 out, out2, in1, in2;
1173 TCGv_i64 addr1;
1174 } DisasOps;
1176 /* Instructions can place constraints on their operands, raising specification
1177 exceptions if they are violated. To make this easy to automate, each "in1",
1178 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1179 of the following, or 0. To make this easy to document, we'll put the
1180 SPEC_<name> defines next to <name>. */
1182 #define SPEC_r1_even 1
1183 #define SPEC_r2_even 2
1184 #define SPEC_r3_even 4
1185 #define SPEC_r1_f128 8
1186 #define SPEC_r2_f128 16
1188 /* Return values from translate_one, indicating the state of the TB. */
1190 /* We are not using a goto_tb (for whatever reason), but have updated
1191 the PC (for whatever reason), so there's no need to do it again on
1192 exiting the TB. */
1193 #define DISAS_PC_UPDATED DISAS_TARGET_0
1195 /* We have emitted one or more goto_tb. No fixup required. */
1196 #define DISAS_GOTO_TB DISAS_TARGET_1
1198 /* We have updated the PC and CC values. */
1199 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1201 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1202 updated the PC for the next instruction to be executed. */
1203 #define DISAS_PC_STALE DISAS_TARGET_3
1205 /* We are exiting the TB to the main loop. */
1206 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1209 /* Instruction flags */
1210 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1211 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1212 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1213 #define IF_BFP 0x0008 /* binary floating point instruction */
1214 #define IF_DFP 0x0010 /* decimal floating point instruction */
1215 #define IF_PRIV 0x0020 /* privileged instruction */
1216 #define IF_VEC 0x0040 /* vector instruction */
1218 struct DisasInsn {
1219 unsigned opc:16;
1220 unsigned flags:16;
1221 DisasFormat fmt:8;
1222 unsigned fac:8;
1223 unsigned spec:8;
1225 const char *name;
1227 /* Pre-process arguments before HELP_OP. */
1228 void (*help_in1)(DisasContext *, DisasOps *);
1229 void (*help_in2)(DisasContext *, DisasOps *);
1230 void (*help_prep)(DisasContext *, DisasOps *);
1233 * Post-process output after HELP_OP.
1234 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1236 void (*help_wout)(DisasContext *, DisasOps *);
1237 void (*help_cout)(DisasContext *, DisasOps *);
1239 /* Implement the operation itself. */
1240 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1242 uint64_t data;
1245 /* ====================================================================== */
1246 /* Miscellaneous helpers, used by several operations. */
1248 static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1250 int b2 = get_field(s, b2);
1251 int d2 = get_field(s, d2);
1253 if (b2 == 0) {
1254 o->in2 = tcg_const_i64(d2 & mask);
1255 } else {
1256 o->in2 = get_address(s, 0, b2, d2);
1257 tcg_gen_andi_i64(o->in2, o->in2, mask);
1261 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1263 if (dest == s->pc_tmp) {
1264 per_branch(s, true);
1265 return DISAS_NEXT;
1267 if (use_goto_tb(s, dest)) {
1268 update_cc_op(s);
1269 per_breaking_event(s);
1270 tcg_gen_goto_tb(0);
1271 tcg_gen_movi_i64(psw_addr, dest);
1272 tcg_gen_exit_tb(s->base.tb, 0);
1273 return DISAS_GOTO_TB;
1274 } else {
1275 tcg_gen_movi_i64(psw_addr, dest);
1276 per_branch(s, false);
1277 return DISAS_PC_UPDATED;
1281 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1282 bool is_imm, int imm, TCGv_i64 cdest)
1284 DisasJumpType ret;
1285 uint64_t dest = s->base.pc_next + 2 * imm;
1286 TCGLabel *lab;
1288 /* Take care of the special cases first. */
1289 if (c->cond == TCG_COND_NEVER) {
1290 ret = DISAS_NEXT;
1291 goto egress;
1293 if (is_imm) {
1294 if (dest == s->pc_tmp) {
1295 /* Branch to next. */
1296 per_branch(s, true);
1297 ret = DISAS_NEXT;
1298 goto egress;
1300 if (c->cond == TCG_COND_ALWAYS) {
1301 ret = help_goto_direct(s, dest);
1302 goto egress;
1304 } else {
1305 if (!cdest) {
1306 /* E.g. bcr %r0 -> no branch. */
1307 ret = DISAS_NEXT;
1308 goto egress;
1310 if (c->cond == TCG_COND_ALWAYS) {
1311 tcg_gen_mov_i64(psw_addr, cdest);
1312 per_branch(s, false);
1313 ret = DISAS_PC_UPDATED;
1314 goto egress;
1318 if (use_goto_tb(s, s->pc_tmp)) {
1319 if (is_imm && use_goto_tb(s, dest)) {
1320 /* Both exits can use goto_tb. */
1321 update_cc_op(s);
1323 lab = gen_new_label();
1324 if (c->is_64) {
1325 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1326 } else {
1327 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1330 /* Branch not taken. */
1331 tcg_gen_goto_tb(0);
1332 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1333 tcg_gen_exit_tb(s->base.tb, 0);
1335 /* Branch taken. */
1336 gen_set_label(lab);
1337 per_breaking_event(s);
1338 tcg_gen_goto_tb(1);
1339 tcg_gen_movi_i64(psw_addr, dest);
1340 tcg_gen_exit_tb(s->base.tb, 1);
1342 ret = DISAS_GOTO_TB;
1343 } else {
1344 /* Fallthru can use goto_tb, but taken branch cannot. */
1345 /* Store taken branch destination before the brcond. This
1346 avoids having to allocate a new local temp to hold it.
1347 We'll overwrite this in the not taken case anyway. */
1348 if (!is_imm) {
1349 tcg_gen_mov_i64(psw_addr, cdest);
1352 lab = gen_new_label();
1353 if (c->is_64) {
1354 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1355 } else {
1356 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1359 /* Branch not taken. */
1360 update_cc_op(s);
1361 tcg_gen_goto_tb(0);
1362 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1363 tcg_gen_exit_tb(s->base.tb, 0);
1365 gen_set_label(lab);
1366 if (is_imm) {
1367 tcg_gen_movi_i64(psw_addr, dest);
1369 per_breaking_event(s);
1370 ret = DISAS_PC_UPDATED;
1372 } else {
1373 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1374 Most commonly we're single-stepping or some other condition that
1375 disables all use of goto_tb. Just update the PC and exit. */
1377 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1378 if (is_imm) {
1379 cdest = tcg_const_i64(dest);
1382 if (c->is_64) {
1383 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1384 cdest, next);
1385 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1386 } else {
1387 TCGv_i32 t0 = tcg_temp_new_i32();
1388 TCGv_i64 t1 = tcg_temp_new_i64();
1389 TCGv_i64 z = tcg_const_i64(0);
1390 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1391 tcg_gen_extu_i32_i64(t1, t0);
1392 tcg_temp_free_i32(t0);
1393 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1394 per_branch_cond(s, TCG_COND_NE, t1, z);
1395 tcg_temp_free_i64(t1);
1396 tcg_temp_free_i64(z);
1399 if (is_imm) {
1400 tcg_temp_free_i64(cdest);
1402 tcg_temp_free_i64(next);
1404 ret = DISAS_PC_UPDATED;
1407 egress:
1408 free_compare(c);
1409 return ret;
1412 /* ====================================================================== */
1413 /* The operations. These perform the bulk of the work for any insn,
1414 usually after the operands have been loaded and output initialized. */
1416 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1418 tcg_gen_abs_i64(o->out, o->in2);
1419 return DISAS_NEXT;
1422 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1424 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1425 return DISAS_NEXT;
1428 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1430 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1431 return DISAS_NEXT;
1434 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1436 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1437 tcg_gen_mov_i64(o->out2, o->in2);
1438 return DISAS_NEXT;
1441 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1443 tcg_gen_add_i64(o->out, o->in1, o->in2);
1444 return DISAS_NEXT;
1447 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1449 DisasCompare cmp;
1450 TCGv_i64 carry;
1452 tcg_gen_add_i64(o->out, o->in1, o->in2);
1454 /* The carry flag is the msb of CC, therefore the branch mask that would
1455 create that comparison is 3. Feeding the generated comparison to
1456 setcond produces the carry flag that we desire. */
1457 disas_jcc(s, &cmp, 3);
1458 carry = tcg_temp_new_i64();
1459 if (cmp.is_64) {
1460 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1461 } else {
1462 TCGv_i32 t = tcg_temp_new_i32();
1463 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1464 tcg_gen_extu_i32_i64(carry, t);
1465 tcg_temp_free_i32(t);
1467 free_compare(&cmp);
1469 tcg_gen_add_i64(o->out, o->out, carry);
1470 tcg_temp_free_i64(carry);
1471 return DISAS_NEXT;
1474 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1476 o->in1 = tcg_temp_new_i64();
1478 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1479 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1480 } else {
1481 /* Perform the atomic addition in memory. */
1482 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1483 s->insn->data);
1486 /* Recompute also for atomic case: needed for setting CC. */
1487 tcg_gen_add_i64(o->out, o->in1, o->in2);
1489 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1490 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1492 return DISAS_NEXT;
1495 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1497 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1498 return DISAS_NEXT;
1501 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1503 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1504 return DISAS_NEXT;
1507 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1509 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1510 return_low128(o->out2);
1511 return DISAS_NEXT;
1514 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1516 tcg_gen_and_i64(o->out, o->in1, o->in2);
1517 return DISAS_NEXT;
1520 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1522 int shift = s->insn->data & 0xff;
1523 int size = s->insn->data >> 8;
1524 uint64_t mask = ((1ull << size) - 1) << shift;
1526 assert(!o->g_in2);
1527 tcg_gen_shli_i64(o->in2, o->in2, shift);
1528 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1529 tcg_gen_and_i64(o->out, o->in1, o->in2);
1531 /* Produce the CC from only the bits manipulated. */
1532 tcg_gen_andi_i64(cc_dst, o->out, mask);
1533 set_cc_nz_u64(s, cc_dst);
1534 return DISAS_NEXT;
1537 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1539 o->in1 = tcg_temp_new_i64();
1541 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1542 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1543 } else {
1544 /* Perform the atomic operation in memory. */
1545 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1546 s->insn->data);
1549 /* Recompute also for atomic case: needed for setting CC. */
1550 tcg_gen_and_i64(o->out, o->in1, o->in2);
1552 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1553 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1555 return DISAS_NEXT;
1558 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1560 pc_to_link_info(o->out, s, s->pc_tmp);
1561 if (o->in2) {
1562 tcg_gen_mov_i64(psw_addr, o->in2);
1563 per_branch(s, false);
1564 return DISAS_PC_UPDATED;
1565 } else {
1566 return DISAS_NEXT;
1570 static void save_link_info(DisasContext *s, DisasOps *o)
1572 TCGv_i64 t;
1574 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1575 pc_to_link_info(o->out, s, s->pc_tmp);
1576 return;
1578 gen_op_calc_cc(s);
1579 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1580 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1581 t = tcg_temp_new_i64();
1582 tcg_gen_shri_i64(t, psw_mask, 16);
1583 tcg_gen_andi_i64(t, t, 0x0f000000);
1584 tcg_gen_or_i64(o->out, o->out, t);
1585 tcg_gen_extu_i32_i64(t, cc_op);
1586 tcg_gen_shli_i64(t, t, 28);
1587 tcg_gen_or_i64(o->out, o->out, t);
1588 tcg_temp_free_i64(t);
1591 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1593 save_link_info(s, o);
1594 if (o->in2) {
1595 tcg_gen_mov_i64(psw_addr, o->in2);
1596 per_branch(s, false);
1597 return DISAS_PC_UPDATED;
1598 } else {
1599 return DISAS_NEXT;
1603 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1605 pc_to_link_info(o->out, s, s->pc_tmp);
1606 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1609 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1611 int m1 = get_field(s, m1);
1612 bool is_imm = have_field(s, i2);
1613 int imm = is_imm ? get_field(s, i2) : 0;
1614 DisasCompare c;
1616 /* BCR with R2 = 0 causes no branching */
1617 if (have_field(s, r2) && get_field(s, r2) == 0) {
1618 if (m1 == 14) {
1619 /* Perform serialization */
1620 /* FIXME: check for fast-BCR-serialization facility */
1621 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1623 if (m1 == 15) {
1624 /* Perform serialization */
1625 /* FIXME: perform checkpoint-synchronisation */
1626 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1628 return DISAS_NEXT;
1631 disas_jcc(s, &c, m1);
1632 return help_branch(s, &c, is_imm, imm, o->in2);
1635 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1637 int r1 = get_field(s, r1);
1638 bool is_imm = have_field(s, i2);
1639 int imm = is_imm ? get_field(s, i2) : 0;
1640 DisasCompare c;
1641 TCGv_i64 t;
1643 c.cond = TCG_COND_NE;
1644 c.is_64 = false;
1645 c.g1 = false;
1646 c.g2 = false;
1648 t = tcg_temp_new_i64();
1649 tcg_gen_subi_i64(t, regs[r1], 1);
1650 store_reg32_i64(r1, t);
1651 c.u.s32.a = tcg_temp_new_i32();
1652 c.u.s32.b = tcg_const_i32(0);
1653 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1654 tcg_temp_free_i64(t);
1656 return help_branch(s, &c, is_imm, imm, o->in2);
1659 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1661 int r1 = get_field(s, r1);
1662 int imm = get_field(s, i2);
1663 DisasCompare c;
1664 TCGv_i64 t;
1666 c.cond = TCG_COND_NE;
1667 c.is_64 = false;
1668 c.g1 = false;
1669 c.g2 = false;
1671 t = tcg_temp_new_i64();
1672 tcg_gen_shri_i64(t, regs[r1], 32);
1673 tcg_gen_subi_i64(t, t, 1);
1674 store_reg32h_i64(r1, t);
1675 c.u.s32.a = tcg_temp_new_i32();
1676 c.u.s32.b = tcg_const_i32(0);
1677 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1678 tcg_temp_free_i64(t);
1680 return help_branch(s, &c, 1, imm, o->in2);
1683 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1685 int r1 = get_field(s, r1);
1686 bool is_imm = have_field(s, i2);
1687 int imm = is_imm ? get_field(s, i2) : 0;
1688 DisasCompare c;
1690 c.cond = TCG_COND_NE;
1691 c.is_64 = true;
1692 c.g1 = true;
1693 c.g2 = false;
1695 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1696 c.u.s64.a = regs[r1];
1697 c.u.s64.b = tcg_const_i64(0);
1699 return help_branch(s, &c, is_imm, imm, o->in2);
1702 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1704 int r1 = get_field(s, r1);
1705 int r3 = get_field(s, r3);
1706 bool is_imm = have_field(s, i2);
1707 int imm = is_imm ? get_field(s, i2) : 0;
1708 DisasCompare c;
1709 TCGv_i64 t;
1711 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1712 c.is_64 = false;
1713 c.g1 = false;
1714 c.g2 = false;
1716 t = tcg_temp_new_i64();
1717 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1718 c.u.s32.a = tcg_temp_new_i32();
1719 c.u.s32.b = tcg_temp_new_i32();
1720 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1721 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1722 store_reg32_i64(r1, t);
1723 tcg_temp_free_i64(t);
1725 return help_branch(s, &c, is_imm, imm, o->in2);
1728 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1730 int r1 = get_field(s, r1);
1731 int r3 = get_field(s, r3);
1732 bool is_imm = have_field(s, i2);
1733 int imm = is_imm ? get_field(s, i2) : 0;
1734 DisasCompare c;
1736 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1737 c.is_64 = true;
1739 if (r1 == (r3 | 1)) {
1740 c.u.s64.b = load_reg(r3 | 1);
1741 c.g2 = false;
1742 } else {
1743 c.u.s64.b = regs[r3 | 1];
1744 c.g2 = true;
1747 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1748 c.u.s64.a = regs[r1];
1749 c.g1 = true;
1751 return help_branch(s, &c, is_imm, imm, o->in2);
1754 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1756 int imm, m3 = get_field(s, m3);
1757 bool is_imm;
1758 DisasCompare c;
1760 c.cond = ltgt_cond[m3];
1761 if (s->insn->data) {
1762 c.cond = tcg_unsigned_cond(c.cond);
1764 c.is_64 = c.g1 = c.g2 = true;
1765 c.u.s64.a = o->in1;
1766 c.u.s64.b = o->in2;
1768 is_imm = have_field(s, i4);
1769 if (is_imm) {
1770 imm = get_field(s, i4);
1771 } else {
1772 imm = 0;
1773 o->out = get_address(s, 0, get_field(s, b4),
1774 get_field(s, d4));
1777 return help_branch(s, &c, is_imm, imm, o->out);
1780 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1782 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1783 set_cc_static(s);
1784 return DISAS_NEXT;
1787 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1789 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1790 set_cc_static(s);
1791 return DISAS_NEXT;
1794 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1796 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1797 set_cc_static(s);
1798 return DISAS_NEXT;
1801 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1802 bool m4_with_fpe)
1804 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1805 uint8_t m3 = get_field(s, m3);
1806 uint8_t m4 = get_field(s, m4);
1808 /* m3 field was introduced with FPE */
1809 if (!fpe && m3_with_fpe) {
1810 m3 = 0;
1812 /* m4 field was introduced with FPE */
1813 if (!fpe && m4_with_fpe) {
1814 m4 = 0;
1817 /* Check for valid rounding modes. Mode 3 was introduced later. */
1818 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1819 gen_program_exception(s, PGM_SPECIFICATION);
1820 return NULL;
1823 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1826 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1828 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1830 if (!m34) {
1831 return DISAS_NORETURN;
1833 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1834 tcg_temp_free_i32(m34);
1835 gen_set_cc_nz_f32(s, o->in2);
1836 return DISAS_NEXT;
1839 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1841 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1843 if (!m34) {
1844 return DISAS_NORETURN;
1846 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1847 tcg_temp_free_i32(m34);
1848 gen_set_cc_nz_f64(s, o->in2);
1849 return DISAS_NEXT;
1852 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1854 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1856 if (!m34) {
1857 return DISAS_NORETURN;
1859 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1860 tcg_temp_free_i32(m34);
1861 gen_set_cc_nz_f128(s, o->in1, o->in2);
1862 return DISAS_NEXT;
1865 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1867 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1869 if (!m34) {
1870 return DISAS_NORETURN;
1872 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1873 tcg_temp_free_i32(m34);
1874 gen_set_cc_nz_f32(s, o->in2);
1875 return DISAS_NEXT;
1878 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1880 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1882 if (!m34) {
1883 return DISAS_NORETURN;
1885 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1886 tcg_temp_free_i32(m34);
1887 gen_set_cc_nz_f64(s, o->in2);
1888 return DISAS_NEXT;
1891 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1893 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1895 if (!m34) {
1896 return DISAS_NORETURN;
1898 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1899 tcg_temp_free_i32(m34);
1900 gen_set_cc_nz_f128(s, o->in1, o->in2);
1901 return DISAS_NEXT;
1904 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1906 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1908 if (!m34) {
1909 return DISAS_NORETURN;
1911 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1912 tcg_temp_free_i32(m34);
1913 gen_set_cc_nz_f32(s, o->in2);
1914 return DISAS_NEXT;
1917 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1919 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1921 if (!m34) {
1922 return DISAS_NORETURN;
1924 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1925 tcg_temp_free_i32(m34);
1926 gen_set_cc_nz_f64(s, o->in2);
1927 return DISAS_NEXT;
1930 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1932 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1934 if (!m34) {
1935 return DISAS_NORETURN;
1937 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1938 tcg_temp_free_i32(m34);
1939 gen_set_cc_nz_f128(s, o->in1, o->in2);
1940 return DISAS_NEXT;
1943 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1945 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1947 if (!m34) {
1948 return DISAS_NORETURN;
1950 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1951 tcg_temp_free_i32(m34);
1952 gen_set_cc_nz_f32(s, o->in2);
1953 return DISAS_NEXT;
1956 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1958 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1960 if (!m34) {
1961 return DISAS_NORETURN;
1963 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1964 tcg_temp_free_i32(m34);
1965 gen_set_cc_nz_f64(s, o->in2);
1966 return DISAS_NEXT;
1969 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1971 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1973 if (!m34) {
1974 return DISAS_NORETURN;
1976 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1977 tcg_temp_free_i32(m34);
1978 gen_set_cc_nz_f128(s, o->in1, o->in2);
1979 return DISAS_NEXT;
1982 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1984 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1986 if (!m34) {
1987 return DISAS_NORETURN;
1989 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1990 tcg_temp_free_i32(m34);
1991 return DISAS_NEXT;
1994 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1996 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1998 if (!m34) {
1999 return DISAS_NORETURN;
2001 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2002 tcg_temp_free_i32(m34);
2003 return DISAS_NEXT;
2006 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2008 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2010 if (!m34) {
2011 return DISAS_NORETURN;
2013 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2014 tcg_temp_free_i32(m34);
2015 return_low128(o->out2);
2016 return DISAS_NEXT;
2019 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2021 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2023 if (!m34) {
2024 return DISAS_NORETURN;
2026 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2027 tcg_temp_free_i32(m34);
2028 return DISAS_NEXT;
2031 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2033 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2035 if (!m34) {
2036 return DISAS_NORETURN;
2038 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2039 tcg_temp_free_i32(m34);
2040 return DISAS_NEXT;
2043 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2045 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2047 if (!m34) {
2048 return DISAS_NORETURN;
2050 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2051 tcg_temp_free_i32(m34);
2052 return_low128(o->out2);
2053 return DISAS_NEXT;
2056 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2058 int r2 = get_field(s, r2);
2059 TCGv_i64 len = tcg_temp_new_i64();
2061 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2062 set_cc_static(s);
2063 return_low128(o->out);
2065 tcg_gen_add_i64(regs[r2], regs[r2], len);
2066 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2067 tcg_temp_free_i64(len);
2069 return DISAS_NEXT;
2072 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2074 int l = get_field(s, l1);
2075 TCGv_i32 vl;
2077 switch (l + 1) {
2078 case 1:
2079 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2080 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2081 break;
2082 case 2:
2083 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2084 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2085 break;
2086 case 4:
2087 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2088 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2089 break;
2090 case 8:
2091 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2092 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2093 break;
2094 default:
2095 vl = tcg_const_i32(l);
2096 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2097 tcg_temp_free_i32(vl);
2098 set_cc_static(s);
2099 return DISAS_NEXT;
2101 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2102 return DISAS_NEXT;
2105 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2107 int r1 = get_field(s, r1);
2108 int r2 = get_field(s, r2);
2109 TCGv_i32 t1, t2;
2111 /* r1 and r2 must be even. */
2112 if (r1 & 1 || r2 & 1) {
2113 gen_program_exception(s, PGM_SPECIFICATION);
2114 return DISAS_NORETURN;
2117 t1 = tcg_const_i32(r1);
2118 t2 = tcg_const_i32(r2);
2119 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2120 tcg_temp_free_i32(t1);
2121 tcg_temp_free_i32(t2);
2122 set_cc_static(s);
2123 return DISAS_NEXT;
2126 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2128 int r1 = get_field(s, r1);
2129 int r3 = get_field(s, r3);
2130 TCGv_i32 t1, t3;
2132 /* r1 and r3 must be even. */
2133 if (r1 & 1 || r3 & 1) {
2134 gen_program_exception(s, PGM_SPECIFICATION);
2135 return DISAS_NORETURN;
2138 t1 = tcg_const_i32(r1);
2139 t3 = tcg_const_i32(r3);
2140 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2141 tcg_temp_free_i32(t1);
2142 tcg_temp_free_i32(t3);
2143 set_cc_static(s);
2144 return DISAS_NEXT;
2147 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2149 int r1 = get_field(s, r1);
2150 int r3 = get_field(s, r3);
2151 TCGv_i32 t1, t3;
2153 /* r1 and r3 must be even. */
2154 if (r1 & 1 || r3 & 1) {
2155 gen_program_exception(s, PGM_SPECIFICATION);
2156 return DISAS_NORETURN;
2159 t1 = tcg_const_i32(r1);
2160 t3 = tcg_const_i32(r3);
2161 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2162 tcg_temp_free_i32(t1);
2163 tcg_temp_free_i32(t3);
2164 set_cc_static(s);
2165 return DISAS_NEXT;
2168 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2170 TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2171 TCGv_i32 t1 = tcg_temp_new_i32();
2172 tcg_gen_extrl_i64_i32(t1, o->in1);
2173 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2174 set_cc_static(s);
2175 tcg_temp_free_i32(t1);
2176 tcg_temp_free_i32(m3);
2177 return DISAS_NEXT;
2180 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2182 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2183 set_cc_static(s);
2184 return_low128(o->in2);
2185 return DISAS_NEXT;
2188 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2190 TCGv_i64 t = tcg_temp_new_i64();
2191 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2192 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2193 tcg_gen_or_i64(o->out, o->out, t);
2194 tcg_temp_free_i64(t);
2195 return DISAS_NEXT;
2198 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2200 int d2 = get_field(s, d2);
2201 int b2 = get_field(s, b2);
2202 TCGv_i64 addr, cc;
2204 /* Note that in1 = R3 (new value) and
2205 in2 = (zero-extended) R1 (expected value). */
2207 addr = get_address(s, 0, b2, d2);
2208 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2209 get_mem_index(s), s->insn->data | MO_ALIGN);
2210 tcg_temp_free_i64(addr);
2212 /* Are the memory and expected values (un)equal? Note that this setcond
2213 produces the output CC value, thus the NE sense of the test. */
2214 cc = tcg_temp_new_i64();
2215 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2216 tcg_gen_extrl_i64_i32(cc_op, cc);
2217 tcg_temp_free_i64(cc);
2218 set_cc_static(s);
2220 return DISAS_NEXT;
2223 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2225 int r1 = get_field(s, r1);
2226 int r3 = get_field(s, r3);
2227 int d2 = get_field(s, d2);
2228 int b2 = get_field(s, b2);
2229 DisasJumpType ret = DISAS_NEXT;
2230 TCGv_i64 addr;
2231 TCGv_i32 t_r1, t_r3;
2233 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2234 addr = get_address(s, 0, b2, d2);
2235 t_r1 = tcg_const_i32(r1);
2236 t_r3 = tcg_const_i32(r3);
2237 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2238 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2239 } else if (HAVE_CMPXCHG128) {
2240 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2241 } else {
2242 gen_helper_exit_atomic(cpu_env);
2243 ret = DISAS_NORETURN;
2245 tcg_temp_free_i64(addr);
2246 tcg_temp_free_i32(t_r1);
2247 tcg_temp_free_i32(t_r3);
2249 set_cc_static(s);
2250 return ret;
2253 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2255 int r3 = get_field(s, r3);
2256 TCGv_i32 t_r3 = tcg_const_i32(r3);
2258 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2259 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2260 } else {
2261 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2263 tcg_temp_free_i32(t_r3);
2265 set_cc_static(s);
2266 return DISAS_NEXT;
2269 #ifndef CONFIG_USER_ONLY
2270 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2272 MemOp mop = s->insn->data;
2273 TCGv_i64 addr, old, cc;
2274 TCGLabel *lab = gen_new_label();
2276 /* Note that in1 = R1 (zero-extended expected value),
2277 out = R1 (original reg), out2 = R1+1 (new value). */
2279 addr = tcg_temp_new_i64();
2280 old = tcg_temp_new_i64();
2281 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2282 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2283 get_mem_index(s), mop | MO_ALIGN);
2284 tcg_temp_free_i64(addr);
2286 /* Are the memory and expected values (un)equal? */
2287 cc = tcg_temp_new_i64();
2288 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2289 tcg_gen_extrl_i64_i32(cc_op, cc);
2291 /* Write back the output now, so that it happens before the
2292 following branch, so that we don't need local temps. */
2293 if ((mop & MO_SIZE) == MO_32) {
2294 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2295 } else {
2296 tcg_gen_mov_i64(o->out, old);
2298 tcg_temp_free_i64(old);
2300 /* If the comparison was equal, and the LSB of R2 was set,
2301 then we need to flush the TLB (for all cpus). */
2302 tcg_gen_xori_i64(cc, cc, 1);
2303 tcg_gen_and_i64(cc, cc, o->in2);
2304 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2305 tcg_temp_free_i64(cc);
2307 gen_helper_purge(cpu_env);
2308 gen_set_label(lab);
2310 return DISAS_NEXT;
2312 #endif
2314 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2316 TCGv_i64 t1 = tcg_temp_new_i64();
2317 TCGv_i32 t2 = tcg_temp_new_i32();
2318 tcg_gen_extrl_i64_i32(t2, o->in1);
2319 gen_helper_cvd(t1, t2);
2320 tcg_temp_free_i32(t2);
2321 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2322 tcg_temp_free_i64(t1);
2323 return DISAS_NEXT;
2326 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2328 int m3 = get_field(s, m3);
2329 TCGLabel *lab = gen_new_label();
2330 TCGCond c;
2332 c = tcg_invert_cond(ltgt_cond[m3]);
2333 if (s->insn->data) {
2334 c = tcg_unsigned_cond(c);
2336 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2338 /* Trap. */
2339 gen_trap(s);
2341 gen_set_label(lab);
2342 return DISAS_NEXT;
2345 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2347 int m3 = get_field(s, m3);
2348 int r1 = get_field(s, r1);
2349 int r2 = get_field(s, r2);
2350 TCGv_i32 tr1, tr2, chk;
2352 /* R1 and R2 must both be even. */
2353 if ((r1 | r2) & 1) {
2354 gen_program_exception(s, PGM_SPECIFICATION);
2355 return DISAS_NORETURN;
2357 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2358 m3 = 0;
2361 tr1 = tcg_const_i32(r1);
2362 tr2 = tcg_const_i32(r2);
2363 chk = tcg_const_i32(m3);
2365 switch (s->insn->data) {
2366 case 12:
2367 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2368 break;
2369 case 14:
2370 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2371 break;
2372 case 21:
2373 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2374 break;
2375 case 24:
2376 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2377 break;
2378 case 41:
2379 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2380 break;
2381 case 42:
2382 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2383 break;
2384 default:
2385 g_assert_not_reached();
2388 tcg_temp_free_i32(tr1);
2389 tcg_temp_free_i32(tr2);
2390 tcg_temp_free_i32(chk);
2391 set_cc_static(s);
2392 return DISAS_NEXT;
2395 #ifndef CONFIG_USER_ONLY
2396 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2398 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2399 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2400 TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2402 gen_helper_diag(cpu_env, r1, r3, func_code);
2404 tcg_temp_free_i32(func_code);
2405 tcg_temp_free_i32(r3);
2406 tcg_temp_free_i32(r1);
2407 return DISAS_NEXT;
2409 #endif
2411 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2413 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2414 return_low128(o->out);
2415 return DISAS_NEXT;
2418 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2420 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2421 return_low128(o->out);
2422 return DISAS_NEXT;
2425 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2427 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2428 return_low128(o->out);
2429 return DISAS_NEXT;
2432 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2434 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2435 return_low128(o->out);
2436 return DISAS_NEXT;
2439 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2441 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2442 return DISAS_NEXT;
2445 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2447 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2448 return DISAS_NEXT;
2451 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2453 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2454 return_low128(o->out2);
2455 return DISAS_NEXT;
2458 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2460 int r2 = get_field(s, r2);
2461 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2462 return DISAS_NEXT;
2465 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2467 /* No cache information provided. */
2468 tcg_gen_movi_i64(o->out, -1);
2469 return DISAS_NEXT;
2472 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2474 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2475 return DISAS_NEXT;
2478 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2480 int r1 = get_field(s, r1);
2481 int r2 = get_field(s, r2);
2482 TCGv_i64 t = tcg_temp_new_i64();
2484 /* Note the "subsequently" in the PoO, which implies a defined result
2485 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2486 tcg_gen_shri_i64(t, psw_mask, 32);
2487 store_reg32_i64(r1, t);
2488 if (r2 != 0) {
2489 store_reg32_i64(r2, psw_mask);
2492 tcg_temp_free_i64(t);
2493 return DISAS_NEXT;
2496 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2498 int r1 = get_field(s, r1);
2499 TCGv_i32 ilen;
2500 TCGv_i64 v1;
2502 /* Nested EXECUTE is not allowed. */
2503 if (unlikely(s->ex_value)) {
2504 gen_program_exception(s, PGM_EXECUTE);
2505 return DISAS_NORETURN;
2508 update_psw_addr(s);
2509 update_cc_op(s);
2511 if (r1 == 0) {
2512 v1 = tcg_const_i64(0);
2513 } else {
2514 v1 = regs[r1];
2517 ilen = tcg_const_i32(s->ilen);
2518 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2519 tcg_temp_free_i32(ilen);
2521 if (r1 == 0) {
2522 tcg_temp_free_i64(v1);
2525 return DISAS_PC_CC_UPDATED;
2528 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2530 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2532 if (!m34) {
2533 return DISAS_NORETURN;
2535 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2536 tcg_temp_free_i32(m34);
2537 return DISAS_NEXT;
2540 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2542 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2544 if (!m34) {
2545 return DISAS_NORETURN;
2547 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2548 tcg_temp_free_i32(m34);
2549 return DISAS_NEXT;
2552 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2554 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2556 if (!m34) {
2557 return DISAS_NORETURN;
2559 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2560 return_low128(o->out2);
2561 tcg_temp_free_i32(m34);
2562 return DISAS_NEXT;
2565 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2567 /* We'll use the original input for cc computation, since we get to
2568 compare that against 0, which ought to be better than comparing
2569 the real output against 64. It also lets cc_dst be a convenient
2570 temporary during our computation. */
2571 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2573 /* R1 = IN ? CLZ(IN) : 64. */
2574 tcg_gen_clzi_i64(o->out, o->in2, 64);
2576 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2577 value by 64, which is undefined. But since the shift is 64 iff the
2578 input is zero, we still get the correct result after and'ing. */
2579 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2580 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2581 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2582 return DISAS_NEXT;
2585 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2587 int m3 = get_field(s, m3);
2588 int pos, len, base = s->insn->data;
2589 TCGv_i64 tmp = tcg_temp_new_i64();
2590 uint64_t ccm;
2592 switch (m3) {
2593 case 0xf:
2594 /* Effectively a 32-bit load. */
2595 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2596 len = 32;
2597 goto one_insert;
2599 case 0xc:
2600 case 0x6:
2601 case 0x3:
2602 /* Effectively a 16-bit load. */
2603 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2604 len = 16;
2605 goto one_insert;
2607 case 0x8:
2608 case 0x4:
2609 case 0x2:
2610 case 0x1:
2611 /* Effectively an 8-bit load. */
2612 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2613 len = 8;
2614 goto one_insert;
2616 one_insert:
2617 pos = base + ctz32(m3) * 8;
2618 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2619 ccm = ((1ull << len) - 1) << pos;
2620 break;
2622 default:
2623 /* This is going to be a sequence of loads and inserts. */
2624 pos = base + 32 - 8;
2625 ccm = 0;
2626 while (m3) {
2627 if (m3 & 0x8) {
2628 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2629 tcg_gen_addi_i64(o->in2, o->in2, 1);
2630 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2631 ccm |= 0xff << pos;
2633 m3 = (m3 << 1) & 0xf;
2634 pos -= 8;
2636 break;
2639 tcg_gen_movi_i64(tmp, ccm);
2640 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2641 tcg_temp_free_i64(tmp);
2642 return DISAS_NEXT;
2645 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2647 int shift = s->insn->data & 0xff;
2648 int size = s->insn->data >> 8;
2649 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2650 return DISAS_NEXT;
2653 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2655 TCGv_i64 t1, t2;
2657 gen_op_calc_cc(s);
2658 t1 = tcg_temp_new_i64();
2659 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2660 t2 = tcg_temp_new_i64();
2661 tcg_gen_extu_i32_i64(t2, cc_op);
2662 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2663 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2664 tcg_temp_free_i64(t1);
2665 tcg_temp_free_i64(t2);
2666 return DISAS_NEXT;
2669 #ifndef CONFIG_USER_ONLY
2670 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2672 TCGv_i32 m4;
2674 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2675 m4 = tcg_const_i32(get_field(s, m4));
2676 } else {
2677 m4 = tcg_const_i32(0);
2679 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2680 tcg_temp_free_i32(m4);
2681 return DISAS_NEXT;
2684 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2686 TCGv_i32 m4;
2688 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2689 m4 = tcg_const_i32(get_field(s, m4));
2690 } else {
2691 m4 = tcg_const_i32(0);
2693 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2694 tcg_temp_free_i32(m4);
2695 return DISAS_NEXT;
2698 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2700 gen_helper_iske(o->out, cpu_env, o->in2);
2701 return DISAS_NEXT;
2703 #endif
2705 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2707 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2708 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2709 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2710 TCGv_i32 t_r1, t_r2, t_r3, type;
2712 switch (s->insn->data) {
2713 case S390_FEAT_TYPE_KMA:
2714 if (r3 == r1 || r3 == r2) {
2715 gen_program_exception(s, PGM_SPECIFICATION);
2716 return DISAS_NORETURN;
2718 /* FALL THROUGH */
2719 case S390_FEAT_TYPE_KMCTR:
2720 if (r3 & 1 || !r3) {
2721 gen_program_exception(s, PGM_SPECIFICATION);
2722 return DISAS_NORETURN;
2724 /* FALL THROUGH */
2725 case S390_FEAT_TYPE_PPNO:
2726 case S390_FEAT_TYPE_KMF:
2727 case S390_FEAT_TYPE_KMC:
2728 case S390_FEAT_TYPE_KMO:
2729 case S390_FEAT_TYPE_KM:
2730 if (r1 & 1 || !r1) {
2731 gen_program_exception(s, PGM_SPECIFICATION);
2732 return DISAS_NORETURN;
2734 /* FALL THROUGH */
2735 case S390_FEAT_TYPE_KMAC:
2736 case S390_FEAT_TYPE_KIMD:
2737 case S390_FEAT_TYPE_KLMD:
2738 if (r2 & 1 || !r2) {
2739 gen_program_exception(s, PGM_SPECIFICATION);
2740 return DISAS_NORETURN;
2742 /* FALL THROUGH */
2743 case S390_FEAT_TYPE_PCKMO:
2744 case S390_FEAT_TYPE_PCC:
2745 break;
2746 default:
2747 g_assert_not_reached();
2750 t_r1 = tcg_const_i32(r1);
2751 t_r2 = tcg_const_i32(r2);
2752 t_r3 = tcg_const_i32(r3);
2753 type = tcg_const_i32(s->insn->data);
2754 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2755 set_cc_static(s);
2756 tcg_temp_free_i32(t_r1);
2757 tcg_temp_free_i32(t_r2);
2758 tcg_temp_free_i32(t_r3);
2759 tcg_temp_free_i32(type);
2760 return DISAS_NEXT;
2763 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2765 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2766 set_cc_static(s);
2767 return DISAS_NEXT;
2770 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2772 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2773 set_cc_static(s);
2774 return DISAS_NEXT;
2777 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2779 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2780 set_cc_static(s);
2781 return DISAS_NEXT;
2784 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2786 /* The real output is indeed the original value in memory;
2787 recompute the addition for the computation of CC. */
2788 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2789 s->insn->data | MO_ALIGN);
2790 /* However, we need to recompute the addition for setting CC. */
2791 tcg_gen_add_i64(o->out, o->in1, o->in2);
2792 return DISAS_NEXT;
2795 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2797 /* The real output is indeed the original value in memory;
2798 recompute the addition for the computation of CC. */
2799 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2800 s->insn->data | MO_ALIGN);
2801 /* However, we need to recompute the operation for setting CC. */
2802 tcg_gen_and_i64(o->out, o->in1, o->in2);
2803 return DISAS_NEXT;
2806 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2808 /* The real output is indeed the original value in memory;
2809 recompute the addition for the computation of CC. */
2810 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2811 s->insn->data | MO_ALIGN);
2812 /* However, we need to recompute the operation for setting CC. */
2813 tcg_gen_or_i64(o->out, o->in1, o->in2);
2814 return DISAS_NEXT;
2817 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2819 /* The real output is indeed the original value in memory;
2820 recompute the addition for the computation of CC. */
2821 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2822 s->insn->data | MO_ALIGN);
2823 /* However, we need to recompute the operation for setting CC. */
2824 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2825 return DISAS_NEXT;
2828 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2830 gen_helper_ldeb(o->out, cpu_env, o->in2);
2831 return DISAS_NEXT;
2834 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2836 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2838 if (!m34) {
2839 return DISAS_NORETURN;
2841 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2842 tcg_temp_free_i32(m34);
2843 return DISAS_NEXT;
2846 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2848 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2850 if (!m34) {
2851 return DISAS_NORETURN;
2853 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2854 tcg_temp_free_i32(m34);
2855 return DISAS_NEXT;
2858 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2860 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2862 if (!m34) {
2863 return DISAS_NORETURN;
2865 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2866 tcg_temp_free_i32(m34);
2867 return DISAS_NEXT;
2870 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2872 gen_helper_lxdb(o->out, cpu_env, o->in2);
2873 return_low128(o->out2);
2874 return DISAS_NEXT;
2877 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2879 gen_helper_lxeb(o->out, cpu_env, o->in2);
2880 return_low128(o->out2);
2881 return DISAS_NEXT;
2884 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2886 tcg_gen_shli_i64(o->out, o->in2, 32);
2887 return DISAS_NEXT;
2890 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2892 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2893 return DISAS_NEXT;
2896 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2898 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2899 return DISAS_NEXT;
2902 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2904 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2905 return DISAS_NEXT;
2908 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2910 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2911 return DISAS_NEXT;
2914 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2916 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2917 return DISAS_NEXT;
2920 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2922 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2923 return DISAS_NEXT;
2926 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2928 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2929 return DISAS_NEXT;
2932 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2934 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2935 return DISAS_NEXT;
2938 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2940 TCGLabel *lab = gen_new_label();
2941 store_reg32_i64(get_field(s, r1), o->in2);
2942 /* The value is stored even in case of trap. */
2943 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2944 gen_trap(s);
2945 gen_set_label(lab);
2946 return DISAS_NEXT;
2949 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2951 TCGLabel *lab = gen_new_label();
2952 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2953 /* The value is stored even in case of trap. */
2954 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2955 gen_trap(s);
2956 gen_set_label(lab);
2957 return DISAS_NEXT;
2960 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2962 TCGLabel *lab = gen_new_label();
2963 store_reg32h_i64(get_field(s, r1), o->in2);
2964 /* The value is stored even in case of trap. */
2965 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2966 gen_trap(s);
2967 gen_set_label(lab);
2968 return DISAS_NEXT;
2971 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2973 TCGLabel *lab = gen_new_label();
2974 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2975 /* The value is stored even in case of trap. */
2976 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2977 gen_trap(s);
2978 gen_set_label(lab);
2979 return DISAS_NEXT;
2982 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2984 TCGLabel *lab = gen_new_label();
2985 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2986 /* The value is stored even in case of trap. */
2987 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2988 gen_trap(s);
2989 gen_set_label(lab);
2990 return DISAS_NEXT;
2993 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2995 DisasCompare c;
2997 disas_jcc(s, &c, get_field(s, m3));
2999 if (c.is_64) {
3000 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
3001 o->in2, o->in1);
3002 free_compare(&c);
3003 } else {
3004 TCGv_i32 t32 = tcg_temp_new_i32();
3005 TCGv_i64 t, z;
3007 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3008 free_compare(&c);
3010 t = tcg_temp_new_i64();
3011 tcg_gen_extu_i32_i64(t, t32);
3012 tcg_temp_free_i32(t32);
3014 z = tcg_const_i64(0);
3015 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3016 tcg_temp_free_i64(t);
3017 tcg_temp_free_i64(z);
3020 return DISAS_NEXT;
3023 #ifndef CONFIG_USER_ONLY
3024 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3026 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3027 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3028 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3029 tcg_temp_free_i32(r1);
3030 tcg_temp_free_i32(r3);
3031 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3032 return DISAS_PC_STALE_NOCHAIN;
3035 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3037 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3038 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3039 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3040 tcg_temp_free_i32(r1);
3041 tcg_temp_free_i32(r3);
3042 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3043 return DISAS_PC_STALE_NOCHAIN;
3046 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3048 gen_helper_lra(o->out, cpu_env, o->in2);
3049 set_cc_static(s);
3050 return DISAS_NEXT;
3053 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3055 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3056 return DISAS_NEXT;
3059 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3061 TCGv_i64 t1, t2;
3063 per_breaking_event(s);
3065 t1 = tcg_temp_new_i64();
3066 t2 = tcg_temp_new_i64();
3067 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3068 MO_TEUL | MO_ALIGN_8);
3069 tcg_gen_addi_i64(o->in2, o->in2, 4);
3070 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3071 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3072 tcg_gen_shli_i64(t1, t1, 32);
3073 gen_helper_load_psw(cpu_env, t1, t2);
3074 tcg_temp_free_i64(t1);
3075 tcg_temp_free_i64(t2);
3076 return DISAS_NORETURN;
3079 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3081 TCGv_i64 t1, t2;
3083 per_breaking_event(s);
3085 t1 = tcg_temp_new_i64();
3086 t2 = tcg_temp_new_i64();
3087 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3088 MO_TEQ | MO_ALIGN_8);
3089 tcg_gen_addi_i64(o->in2, o->in2, 8);
3090 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3091 gen_helper_load_psw(cpu_env, t1, t2);
3092 tcg_temp_free_i64(t1);
3093 tcg_temp_free_i64(t2);
3094 return DISAS_NORETURN;
3096 #endif
3098 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3100 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3101 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3102 gen_helper_lam(cpu_env, r1, o->in2, r3);
3103 tcg_temp_free_i32(r1);
3104 tcg_temp_free_i32(r3);
3105 return DISAS_NEXT;
3108 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3110 int r1 = get_field(s, r1);
3111 int r3 = get_field(s, r3);
3112 TCGv_i64 t1, t2;
3114 /* Only one register to read. */
3115 t1 = tcg_temp_new_i64();
3116 if (unlikely(r1 == r3)) {
3117 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3118 store_reg32_i64(r1, t1);
3119 tcg_temp_free(t1);
3120 return DISAS_NEXT;
3123 /* First load the values of the first and last registers to trigger
3124 possible page faults. */
3125 t2 = tcg_temp_new_i64();
3126 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3127 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3128 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3129 store_reg32_i64(r1, t1);
3130 store_reg32_i64(r3, t2);
3132 /* Only two registers to read. */
3133 if (((r1 + 1) & 15) == r3) {
3134 tcg_temp_free(t2);
3135 tcg_temp_free(t1);
3136 return DISAS_NEXT;
3139 /* Then load the remaining registers. Page fault can't occur. */
3140 r3 = (r3 - 1) & 15;
3141 tcg_gen_movi_i64(t2, 4);
3142 while (r1 != r3) {
3143 r1 = (r1 + 1) & 15;
3144 tcg_gen_add_i64(o->in2, o->in2, t2);
3145 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3146 store_reg32_i64(r1, t1);
3148 tcg_temp_free(t2);
3149 tcg_temp_free(t1);
3151 return DISAS_NEXT;
3154 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3156 int r1 = get_field(s, r1);
3157 int r3 = get_field(s, r3);
3158 TCGv_i64 t1, t2;
3160 /* Only one register to read. */
3161 t1 = tcg_temp_new_i64();
3162 if (unlikely(r1 == r3)) {
3163 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3164 store_reg32h_i64(r1, t1);
3165 tcg_temp_free(t1);
3166 return DISAS_NEXT;
3169 /* First load the values of the first and last registers to trigger
3170 possible page faults. */
3171 t2 = tcg_temp_new_i64();
3172 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3173 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3174 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3175 store_reg32h_i64(r1, t1);
3176 store_reg32h_i64(r3, t2);
3178 /* Only two registers to read. */
3179 if (((r1 + 1) & 15) == r3) {
3180 tcg_temp_free(t2);
3181 tcg_temp_free(t1);
3182 return DISAS_NEXT;
3185 /* Then load the remaining registers. Page fault can't occur. */
3186 r3 = (r3 - 1) & 15;
3187 tcg_gen_movi_i64(t2, 4);
3188 while (r1 != r3) {
3189 r1 = (r1 + 1) & 15;
3190 tcg_gen_add_i64(o->in2, o->in2, t2);
3191 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3192 store_reg32h_i64(r1, t1);
3194 tcg_temp_free(t2);
3195 tcg_temp_free(t1);
3197 return DISAS_NEXT;
3200 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3202 int r1 = get_field(s, r1);
3203 int r3 = get_field(s, r3);
3204 TCGv_i64 t1, t2;
3206 /* Only one register to read. */
3207 if (unlikely(r1 == r3)) {
3208 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3209 return DISAS_NEXT;
3212 /* First load the values of the first and last registers to trigger
3213 possible page faults. */
3214 t1 = tcg_temp_new_i64();
3215 t2 = tcg_temp_new_i64();
3216 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3217 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3218 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3219 tcg_gen_mov_i64(regs[r1], t1);
3220 tcg_temp_free(t2);
3222 /* Only two registers to read. */
3223 if (((r1 + 1) & 15) == r3) {
3224 tcg_temp_free(t1);
3225 return DISAS_NEXT;
3228 /* Then load the remaining registers. Page fault can't occur. */
3229 r3 = (r3 - 1) & 15;
3230 tcg_gen_movi_i64(t1, 8);
3231 while (r1 != r3) {
3232 r1 = (r1 + 1) & 15;
3233 tcg_gen_add_i64(o->in2, o->in2, t1);
3234 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3236 tcg_temp_free(t1);
3238 return DISAS_NEXT;
3241 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3243 TCGv_i64 a1, a2;
3244 MemOp mop = s->insn->data;
3246 /* In a parallel context, stop the world and single step. */
3247 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3248 update_psw_addr(s);
3249 update_cc_op(s);
3250 gen_exception(EXCP_ATOMIC);
3251 return DISAS_NORETURN;
3254 /* In a serial context, perform the two loads ... */
3255 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3256 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3257 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3258 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3259 tcg_temp_free_i64(a1);
3260 tcg_temp_free_i64(a2);
3262 /* ... and indicate that we performed them while interlocked. */
3263 gen_op_movi_cc(s, 0);
3264 return DISAS_NEXT;
3267 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3269 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3270 gen_helper_lpq(o->out, cpu_env, o->in2);
3271 } else if (HAVE_ATOMIC128) {
3272 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3273 } else {
3274 gen_helper_exit_atomic(cpu_env);
3275 return DISAS_NORETURN;
3277 return_low128(o->out2);
3278 return DISAS_NEXT;
3281 #ifndef CONFIG_USER_ONLY
3282 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3284 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
3285 tcg_gen_qemu_ld_tl(o->out, o->addr1, MMU_REAL_IDX, s->insn->data);
3286 return DISAS_NEXT;
3288 #endif
3290 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3292 tcg_gen_andi_i64(o->out, o->in2, -256);
3293 return DISAS_NEXT;
3296 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3298 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3300 if (get_field(s, m3) > 6) {
3301 gen_program_exception(s, PGM_SPECIFICATION);
3302 return DISAS_NORETURN;
3305 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3306 tcg_gen_neg_i64(o->addr1, o->addr1);
3307 tcg_gen_movi_i64(o->out, 16);
3308 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3309 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3310 return DISAS_NEXT;
3313 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3315 #if !defined(CONFIG_USER_ONLY)
3316 TCGv_i32 i2;
3317 #endif
3318 const uint16_t monitor_class = get_field(s, i2);
3320 if (monitor_class & 0xff00) {
3321 gen_program_exception(s, PGM_SPECIFICATION);
3322 return DISAS_NORETURN;
3325 #if !defined(CONFIG_USER_ONLY)
3326 i2 = tcg_const_i32(monitor_class);
3327 gen_helper_monitor_call(cpu_env, o->addr1, i2);
3328 tcg_temp_free_i32(i2);
3329 #endif
3330 /* Defaults to a NOP. */
3331 return DISAS_NEXT;
3334 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3336 o->out = o->in2;
3337 o->g_out = o->g_in2;
3338 o->in2 = NULL;
3339 o->g_in2 = false;
3340 return DISAS_NEXT;
3343 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3345 int b2 = get_field(s, b2);
3346 TCGv ar1 = tcg_temp_new_i64();
3348 o->out = o->in2;
3349 o->g_out = o->g_in2;
3350 o->in2 = NULL;
3351 o->g_in2 = false;
3353 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3354 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3355 tcg_gen_movi_i64(ar1, 0);
3356 break;
3357 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3358 tcg_gen_movi_i64(ar1, 1);
3359 break;
3360 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3361 if (b2) {
3362 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3363 } else {
3364 tcg_gen_movi_i64(ar1, 0);
3366 break;
3367 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3368 tcg_gen_movi_i64(ar1, 2);
3369 break;
3372 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3373 tcg_temp_free_i64(ar1);
3375 return DISAS_NEXT;
3378 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3380 o->out = o->in1;
3381 o->out2 = o->in2;
3382 o->g_out = o->g_in1;
3383 o->g_out2 = o->g_in2;
3384 o->in1 = NULL;
3385 o->in2 = NULL;
3386 o->g_in1 = o->g_in2 = false;
3387 return DISAS_NEXT;
3390 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3392 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3393 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3394 tcg_temp_free_i32(l);
3395 return DISAS_NEXT;
3398 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3400 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3401 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3402 tcg_temp_free_i32(l);
3403 return DISAS_NEXT;
3406 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3408 int r1 = get_field(s, r1);
3409 int r2 = get_field(s, r2);
3410 TCGv_i32 t1, t2;
3412 /* r1 and r2 must be even. */
3413 if (r1 & 1 || r2 & 1) {
3414 gen_program_exception(s, PGM_SPECIFICATION);
3415 return DISAS_NORETURN;
3418 t1 = tcg_const_i32(r1);
3419 t2 = tcg_const_i32(r2);
3420 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3421 tcg_temp_free_i32(t1);
3422 tcg_temp_free_i32(t2);
3423 set_cc_static(s);
3424 return DISAS_NEXT;
3427 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3429 int r1 = get_field(s, r1);
3430 int r3 = get_field(s, r3);
3431 TCGv_i32 t1, t3;
3433 /* r1 and r3 must be even. */
3434 if (r1 & 1 || r3 & 1) {
3435 gen_program_exception(s, PGM_SPECIFICATION);
3436 return DISAS_NORETURN;
3439 t1 = tcg_const_i32(r1);
3440 t3 = tcg_const_i32(r3);
3441 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3442 tcg_temp_free_i32(t1);
3443 tcg_temp_free_i32(t3);
3444 set_cc_static(s);
3445 return DISAS_NEXT;
3448 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3450 int r1 = get_field(s, r1);
3451 int r3 = get_field(s, r3);
3452 TCGv_i32 t1, t3;
3454 /* r1 and r3 must be even. */
3455 if (r1 & 1 || r3 & 1) {
3456 gen_program_exception(s, PGM_SPECIFICATION);
3457 return DISAS_NORETURN;
3460 t1 = tcg_const_i32(r1);
3461 t3 = tcg_const_i32(r3);
3462 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3463 tcg_temp_free_i32(t1);
3464 tcg_temp_free_i32(t3);
3465 set_cc_static(s);
3466 return DISAS_NEXT;
3469 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3471 int r3 = get_field(s, r3);
3472 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3473 set_cc_static(s);
3474 return DISAS_NEXT;
3477 #ifndef CONFIG_USER_ONLY
3478 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3480 int r1 = get_field(s, l1);
3481 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3482 set_cc_static(s);
3483 return DISAS_NEXT;
3486 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3488 int r1 = get_field(s, l1);
3489 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3490 set_cc_static(s);
3491 return DISAS_NEXT;
3493 #endif
3495 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3497 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3498 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3499 tcg_temp_free_i32(l);
3500 return DISAS_NEXT;
3503 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3505 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3506 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3507 tcg_temp_free_i32(l);
3508 return DISAS_NEXT;
3511 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3513 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3514 set_cc_static(s);
3515 return DISAS_NEXT;
3518 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3520 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3521 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3523 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3524 tcg_temp_free_i32(t1);
3525 tcg_temp_free_i32(t2);
3526 set_cc_static(s);
3527 return DISAS_NEXT;
3530 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3532 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3533 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3534 tcg_temp_free_i32(l);
3535 return DISAS_NEXT;
3538 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3540 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3541 return DISAS_NEXT;
3544 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3546 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3547 return DISAS_NEXT;
3550 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3552 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3553 return DISAS_NEXT;
3556 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3558 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3559 return DISAS_NEXT;
3562 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3564 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3565 return DISAS_NEXT;
3568 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3570 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3571 return DISAS_NEXT;
3574 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3576 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3577 return_low128(o->out2);
3578 return DISAS_NEXT;
3581 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3583 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3584 return_low128(o->out2);
3585 return DISAS_NEXT;
3588 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3590 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3591 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3592 tcg_temp_free_i64(r3);
3593 return DISAS_NEXT;
3596 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3598 TCGv_i64 r3 = load_freg(get_field(s, r3));
3599 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3600 tcg_temp_free_i64(r3);
3601 return DISAS_NEXT;
3604 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3606 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3607 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3608 tcg_temp_free_i64(r3);
3609 return DISAS_NEXT;
3612 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3614 TCGv_i64 r3 = load_freg(get_field(s, r3));
3615 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3616 tcg_temp_free_i64(r3);
3617 return DISAS_NEXT;
3620 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3622 TCGv_i64 z, n;
3623 z = tcg_const_i64(0);
3624 n = tcg_temp_new_i64();
3625 tcg_gen_neg_i64(n, o->in2);
3626 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3627 tcg_temp_free_i64(n);
3628 tcg_temp_free_i64(z);
3629 return DISAS_NEXT;
3632 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3634 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3635 return DISAS_NEXT;
3638 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3640 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3641 return DISAS_NEXT;
3644 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3646 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3647 tcg_gen_mov_i64(o->out2, o->in2);
3648 return DISAS_NEXT;
3651 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3653 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3654 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3655 tcg_temp_free_i32(l);
3656 set_cc_static(s);
3657 return DISAS_NEXT;
3660 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3662 tcg_gen_neg_i64(o->out, o->in2);
3663 return DISAS_NEXT;
3666 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3668 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3669 return DISAS_NEXT;
3672 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3674 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3675 return DISAS_NEXT;
3678 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3680 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3681 tcg_gen_mov_i64(o->out2, o->in2);
3682 return DISAS_NEXT;
3685 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3687 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3688 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3689 tcg_temp_free_i32(l);
3690 set_cc_static(s);
3691 return DISAS_NEXT;
3694 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3696 tcg_gen_or_i64(o->out, o->in1, o->in2);
3697 return DISAS_NEXT;
3700 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3702 int shift = s->insn->data & 0xff;
3703 int size = s->insn->data >> 8;
3704 uint64_t mask = ((1ull << size) - 1) << shift;
3706 assert(!o->g_in2);
3707 tcg_gen_shli_i64(o->in2, o->in2, shift);
3708 tcg_gen_or_i64(o->out, o->in1, o->in2);
3710 /* Produce the CC from only the bits manipulated. */
3711 tcg_gen_andi_i64(cc_dst, o->out, mask);
3712 set_cc_nz_u64(s, cc_dst);
3713 return DISAS_NEXT;
3716 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3718 o->in1 = tcg_temp_new_i64();
3720 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3721 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3722 } else {
3723 /* Perform the atomic operation in memory. */
3724 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3725 s->insn->data);
3728 /* Recompute also for atomic case: needed for setting CC. */
3729 tcg_gen_or_i64(o->out, o->in1, o->in2);
3731 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3732 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3734 return DISAS_NEXT;
3737 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3739 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3740 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3741 tcg_temp_free_i32(l);
3742 return DISAS_NEXT;
3745 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3747 int l2 = get_field(s, l2) + 1;
3748 TCGv_i32 l;
3750 /* The length must not exceed 32 bytes. */
3751 if (l2 > 32) {
3752 gen_program_exception(s, PGM_SPECIFICATION);
3753 return DISAS_NORETURN;
3755 l = tcg_const_i32(l2);
3756 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3757 tcg_temp_free_i32(l);
3758 return DISAS_NEXT;
3761 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3763 int l2 = get_field(s, l2) + 1;
3764 TCGv_i32 l;
3766 /* The length must be even and should not exceed 64 bytes. */
3767 if ((l2 & 1) || (l2 > 64)) {
3768 gen_program_exception(s, PGM_SPECIFICATION);
3769 return DISAS_NORETURN;
3771 l = tcg_const_i32(l2);
3772 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3773 tcg_temp_free_i32(l);
3774 return DISAS_NEXT;
3777 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3779 gen_helper_popcnt(o->out, o->in2);
3780 return DISAS_NEXT;
3783 #ifndef CONFIG_USER_ONLY
3784 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3786 gen_helper_ptlb(cpu_env);
3787 return DISAS_NEXT;
3789 #endif
3791 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3793 int i3 = get_field(s, i3);
3794 int i4 = get_field(s, i4);
3795 int i5 = get_field(s, i5);
3796 int do_zero = i4 & 0x80;
3797 uint64_t mask, imask, pmask;
3798 int pos, len, rot;
3800 /* Adjust the arguments for the specific insn. */
3801 switch (s->fields.op2) {
3802 case 0x55: /* risbg */
3803 case 0x59: /* risbgn */
3804 i3 &= 63;
3805 i4 &= 63;
3806 pmask = ~0;
3807 break;
3808 case 0x5d: /* risbhg */
3809 i3 &= 31;
3810 i4 &= 31;
3811 pmask = 0xffffffff00000000ull;
3812 break;
3813 case 0x51: /* risblg */
3814 i3 &= 31;
3815 i4 &= 31;
3816 pmask = 0x00000000ffffffffull;
3817 break;
3818 default:
3819 g_assert_not_reached();
3822 /* MASK is the set of bits to be inserted from R2.
3823 Take care for I3/I4 wraparound. */
3824 mask = pmask >> i3;
3825 if (i3 <= i4) {
3826 mask ^= pmask >> i4 >> 1;
3827 } else {
3828 mask |= ~(pmask >> i4 >> 1);
3830 mask &= pmask;
3832 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3833 insns, we need to keep the other half of the register. */
3834 imask = ~mask | ~pmask;
3835 if (do_zero) {
3836 imask = ~pmask;
3839 len = i4 - i3 + 1;
3840 pos = 63 - i4;
3841 rot = i5 & 63;
3842 if (s->fields.op2 == 0x5d) {
3843 pos += 32;
3846 /* In some cases we can implement this with extract. */
3847 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3848 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3849 return DISAS_NEXT;
3852 /* In some cases we can implement this with deposit. */
3853 if (len > 0 && (imask == 0 || ~mask == imask)) {
3854 /* Note that we rotate the bits to be inserted to the lsb, not to
3855 the position as described in the PoO. */
3856 rot = (rot - pos) & 63;
3857 } else {
3858 pos = -1;
3861 /* Rotate the input as necessary. */
3862 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3864 /* Insert the selected bits into the output. */
3865 if (pos >= 0) {
3866 if (imask == 0) {
3867 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3868 } else {
3869 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3871 } else if (imask == 0) {
3872 tcg_gen_andi_i64(o->out, o->in2, mask);
3873 } else {
3874 tcg_gen_andi_i64(o->in2, o->in2, mask);
3875 tcg_gen_andi_i64(o->out, o->out, imask);
3876 tcg_gen_or_i64(o->out, o->out, o->in2);
3878 return DISAS_NEXT;
3881 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3883 int i3 = get_field(s, i3);
3884 int i4 = get_field(s, i4);
3885 int i5 = get_field(s, i5);
3886 uint64_t mask;
3888 /* If this is a test-only form, arrange to discard the result. */
3889 if (i3 & 0x80) {
3890 o->out = tcg_temp_new_i64();
3891 o->g_out = false;
3894 i3 &= 63;
3895 i4 &= 63;
3896 i5 &= 63;
3898 /* MASK is the set of bits to be operated on from R2.
3899 Take care for I3/I4 wraparound. */
3900 mask = ~0ull >> i3;
3901 if (i3 <= i4) {
3902 mask ^= ~0ull >> i4 >> 1;
3903 } else {
3904 mask |= ~(~0ull >> i4 >> 1);
3907 /* Rotate the input as necessary. */
3908 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3910 /* Operate. */
3911 switch (s->fields.op2) {
3912 case 0x54: /* AND */
3913 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3914 tcg_gen_and_i64(o->out, o->out, o->in2);
3915 break;
3916 case 0x56: /* OR */
3917 tcg_gen_andi_i64(o->in2, o->in2, mask);
3918 tcg_gen_or_i64(o->out, o->out, o->in2);
3919 break;
3920 case 0x57: /* XOR */
3921 tcg_gen_andi_i64(o->in2, o->in2, mask);
3922 tcg_gen_xor_i64(o->out, o->out, o->in2);
3923 break;
3924 default:
3925 abort();
3928 /* Set the CC. */
3929 tcg_gen_andi_i64(cc_dst, o->out, mask);
3930 set_cc_nz_u64(s, cc_dst);
3931 return DISAS_NEXT;
3934 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3936 tcg_gen_bswap16_i64(o->out, o->in2);
3937 return DISAS_NEXT;
3940 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3942 tcg_gen_bswap32_i64(o->out, o->in2);
3943 return DISAS_NEXT;
3946 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3948 tcg_gen_bswap64_i64(o->out, o->in2);
3949 return DISAS_NEXT;
3952 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3954 TCGv_i32 t1 = tcg_temp_new_i32();
3955 TCGv_i32 t2 = tcg_temp_new_i32();
3956 TCGv_i32 to = tcg_temp_new_i32();
3957 tcg_gen_extrl_i64_i32(t1, o->in1);
3958 tcg_gen_extrl_i64_i32(t2, o->in2);
3959 tcg_gen_rotl_i32(to, t1, t2);
3960 tcg_gen_extu_i32_i64(o->out, to);
3961 tcg_temp_free_i32(t1);
3962 tcg_temp_free_i32(t2);
3963 tcg_temp_free_i32(to);
3964 return DISAS_NEXT;
3967 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3969 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3970 return DISAS_NEXT;
3973 #ifndef CONFIG_USER_ONLY
3974 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3976 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3977 set_cc_static(s);
3978 return DISAS_NEXT;
3981 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3983 gen_helper_sacf(cpu_env, o->in2);
3984 /* Addressing mode has changed, so end the block. */
3985 return DISAS_PC_STALE;
3987 #endif
3989 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3991 int sam = s->insn->data;
3992 TCGv_i64 tsam;
3993 uint64_t mask;
3995 switch (sam) {
3996 case 0:
3997 mask = 0xffffff;
3998 break;
3999 case 1:
4000 mask = 0x7fffffff;
4001 break;
4002 default:
4003 mask = -1;
4004 break;
4007 /* Bizarre but true, we check the address of the current insn for the
4008 specification exception, not the next to be executed. Thus the PoO
4009 documents that Bad Things Happen two bytes before the end. */
4010 if (s->base.pc_next & ~mask) {
4011 gen_program_exception(s, PGM_SPECIFICATION);
4012 return DISAS_NORETURN;
4014 s->pc_tmp &= mask;
4016 tsam = tcg_const_i64(sam);
4017 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4018 tcg_temp_free_i64(tsam);
4020 /* Always exit the TB, since we (may have) changed execution mode. */
4021 return DISAS_PC_STALE;
4024 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4026 int r1 = get_field(s, r1);
4027 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4028 return DISAS_NEXT;
4031 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4033 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4034 return DISAS_NEXT;
4037 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4039 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4040 return DISAS_NEXT;
4043 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4045 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4046 return_low128(o->out2);
4047 return DISAS_NEXT;
4050 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4052 gen_helper_sqeb(o->out, cpu_env, o->in2);
4053 return DISAS_NEXT;
4056 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4058 gen_helper_sqdb(o->out, cpu_env, o->in2);
4059 return DISAS_NEXT;
4062 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4064 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4065 return_low128(o->out2);
4066 return DISAS_NEXT;
4069 #ifndef CONFIG_USER_ONLY
4070 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4072 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4073 set_cc_static(s);
4074 return DISAS_NEXT;
4077 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4079 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4080 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4081 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4082 set_cc_static(s);
4083 tcg_temp_free_i32(r1);
4084 tcg_temp_free_i32(r3);
4085 return DISAS_NEXT;
4087 #endif
4089 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4091 DisasCompare c;
4092 TCGv_i64 a, h;
4093 TCGLabel *lab;
4094 int r1;
4096 disas_jcc(s, &c, get_field(s, m3));
4098 /* We want to store when the condition is fulfilled, so branch
4099 out when it's not */
4100 c.cond = tcg_invert_cond(c.cond);
4102 lab = gen_new_label();
4103 if (c.is_64) {
4104 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4105 } else {
4106 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4108 free_compare(&c);
4110 r1 = get_field(s, r1);
4111 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4112 switch (s->insn->data) {
4113 case 1: /* STOCG */
4114 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4115 break;
4116 case 0: /* STOC */
4117 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4118 break;
4119 case 2: /* STOCFH */
4120 h = tcg_temp_new_i64();
4121 tcg_gen_shri_i64(h, regs[r1], 32);
4122 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4123 tcg_temp_free_i64(h);
4124 break;
4125 default:
4126 g_assert_not_reached();
4128 tcg_temp_free_i64(a);
4130 gen_set_label(lab);
4131 return DISAS_NEXT;
4134 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4136 uint64_t sign = 1ull << s->insn->data;
4137 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4138 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4139 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4140 /* The arithmetic left shift is curious in that it does not affect
4141 the sign bit. Copy that over from the source unchanged. */
4142 tcg_gen_andi_i64(o->out, o->out, ~sign);
4143 tcg_gen_andi_i64(o->in1, o->in1, sign);
4144 tcg_gen_or_i64(o->out, o->out, o->in1);
4145 return DISAS_NEXT;
4148 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4150 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4151 return DISAS_NEXT;
4154 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4156 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4157 return DISAS_NEXT;
4160 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4162 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4163 return DISAS_NEXT;
4166 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4168 gen_helper_sfpc(cpu_env, o->in2);
4169 return DISAS_NEXT;
4172 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4174 gen_helper_sfas(cpu_env, o->in2);
4175 return DISAS_NEXT;
4178 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4180 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4181 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4182 gen_helper_srnm(cpu_env, o->addr1);
4183 return DISAS_NEXT;
4186 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4188 /* Bits 0-55 are are ignored. */
4189 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4190 gen_helper_srnm(cpu_env, o->addr1);
4191 return DISAS_NEXT;
4194 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4196 TCGv_i64 tmp = tcg_temp_new_i64();
4198 /* Bits other than 61-63 are ignored. */
4199 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4201 /* No need to call a helper, we don't implement dfp */
4202 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4203 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4204 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4206 tcg_temp_free_i64(tmp);
4207 return DISAS_NEXT;
4210 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4212 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4213 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4214 set_cc_static(s);
4216 tcg_gen_shri_i64(o->in1, o->in1, 24);
4217 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4218 return DISAS_NEXT;
4221 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4223 int b1 = get_field(s, b1);
4224 int d1 = get_field(s, d1);
4225 int b2 = get_field(s, b2);
4226 int d2 = get_field(s, d2);
4227 int r3 = get_field(s, r3);
4228 TCGv_i64 tmp = tcg_temp_new_i64();
4230 /* fetch all operands first */
4231 o->in1 = tcg_temp_new_i64();
4232 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4233 o->in2 = tcg_temp_new_i64();
4234 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4235 o->addr1 = get_address(s, 0, r3, 0);
4237 /* load the third operand into r3 before modifying anything */
4238 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4240 /* subtract CPU timer from first operand and store in GR0 */
4241 gen_helper_stpt(tmp, cpu_env);
4242 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4244 /* store second operand in GR1 */
4245 tcg_gen_mov_i64(regs[1], o->in2);
4247 tcg_temp_free_i64(tmp);
4248 return DISAS_NEXT;
4251 #ifndef CONFIG_USER_ONLY
4252 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4254 tcg_gen_shri_i64(o->in2, o->in2, 4);
4255 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4256 return DISAS_NEXT;
4259 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4261 gen_helper_sske(cpu_env, o->in1, o->in2);
4262 return DISAS_NEXT;
4265 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4267 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4268 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4269 return DISAS_PC_STALE_NOCHAIN;
4272 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4274 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4275 return DISAS_NEXT;
4277 #endif
4279 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4281 gen_helper_stck(o->out, cpu_env);
4282 /* ??? We don't implement clock states. */
4283 gen_op_movi_cc(s, 0);
4284 return DISAS_NEXT;
4287 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4289 TCGv_i64 c1 = tcg_temp_new_i64();
4290 TCGv_i64 c2 = tcg_temp_new_i64();
4291 TCGv_i64 todpr = tcg_temp_new_i64();
4292 gen_helper_stck(c1, cpu_env);
4293 /* 16 bit value store in an uint32_t (only valid bits set) */
4294 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4295 /* Shift the 64-bit value into its place as a zero-extended
4296 104-bit value. Note that "bit positions 64-103 are always
4297 non-zero so that they compare differently to STCK"; we set
4298 the least significant bit to 1. */
4299 tcg_gen_shli_i64(c2, c1, 56);
4300 tcg_gen_shri_i64(c1, c1, 8);
4301 tcg_gen_ori_i64(c2, c2, 0x10000);
4302 tcg_gen_or_i64(c2, c2, todpr);
4303 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4304 tcg_gen_addi_i64(o->in2, o->in2, 8);
4305 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4306 tcg_temp_free_i64(c1);
4307 tcg_temp_free_i64(c2);
4308 tcg_temp_free_i64(todpr);
4309 /* ??? We don't implement clock states. */
4310 gen_op_movi_cc(s, 0);
4311 return DISAS_NEXT;
4314 #ifndef CONFIG_USER_ONLY
4315 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4317 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4318 gen_helper_sck(cc_op, cpu_env, o->in1);
4319 set_cc_static(s);
4320 return DISAS_NEXT;
4323 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4325 gen_helper_sckc(cpu_env, o->in2);
4326 return DISAS_NEXT;
4329 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4331 gen_helper_sckpf(cpu_env, regs[0]);
4332 return DISAS_NEXT;
4335 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4337 gen_helper_stckc(o->out, cpu_env);
4338 return DISAS_NEXT;
4341 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4343 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4344 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4345 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4346 tcg_temp_free_i32(r1);
4347 tcg_temp_free_i32(r3);
4348 return DISAS_NEXT;
4351 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4353 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4354 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4355 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4356 tcg_temp_free_i32(r1);
4357 tcg_temp_free_i32(r3);
4358 return DISAS_NEXT;
4361 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4363 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4364 return DISAS_NEXT;
4367 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4369 gen_helper_spt(cpu_env, o->in2);
4370 return DISAS_NEXT;
4373 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4375 gen_helper_stfl(cpu_env);
4376 return DISAS_NEXT;
4379 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4381 gen_helper_stpt(o->out, cpu_env);
4382 return DISAS_NEXT;
4385 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4387 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4388 set_cc_static(s);
4389 return DISAS_NEXT;
4392 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4394 gen_helper_spx(cpu_env, o->in2);
4395 return DISAS_NEXT;
4398 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4400 gen_helper_xsch(cpu_env, regs[1]);
4401 set_cc_static(s);
4402 return DISAS_NEXT;
4405 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4407 gen_helper_csch(cpu_env, regs[1]);
4408 set_cc_static(s);
4409 return DISAS_NEXT;
4412 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4414 gen_helper_hsch(cpu_env, regs[1]);
4415 set_cc_static(s);
4416 return DISAS_NEXT;
4419 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4421 gen_helper_msch(cpu_env, regs[1], o->in2);
4422 set_cc_static(s);
4423 return DISAS_NEXT;
4426 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4428 gen_helper_rchp(cpu_env, regs[1]);
4429 set_cc_static(s);
4430 return DISAS_NEXT;
4433 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4435 gen_helper_rsch(cpu_env, regs[1]);
4436 set_cc_static(s);
4437 return DISAS_NEXT;
4440 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4442 gen_helper_sal(cpu_env, regs[1]);
4443 return DISAS_NEXT;
4446 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4448 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4449 return DISAS_NEXT;
4452 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4454 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4455 gen_op_movi_cc(s, 3);
4456 return DISAS_NEXT;
4459 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4461 /* The instruction is suppressed if not provided. */
4462 return DISAS_NEXT;
4465 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4467 gen_helper_ssch(cpu_env, regs[1], o->in2);
4468 set_cc_static(s);
4469 return DISAS_NEXT;
4472 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4474 gen_helper_stsch(cpu_env, regs[1], o->in2);
4475 set_cc_static(s);
4476 return DISAS_NEXT;
4479 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4481 gen_helper_stcrw(cpu_env, o->in2);
4482 set_cc_static(s);
4483 return DISAS_NEXT;
4486 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4488 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4489 set_cc_static(s);
4490 return DISAS_NEXT;
4493 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4495 gen_helper_tsch(cpu_env, regs[1], o->in2);
4496 set_cc_static(s);
4497 return DISAS_NEXT;
4500 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4502 gen_helper_chsc(cpu_env, o->in2);
4503 set_cc_static(s);
4504 return DISAS_NEXT;
4507 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4509 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4510 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4511 return DISAS_NEXT;
4514 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4516 uint64_t i2 = get_field(s, i2);
4517 TCGv_i64 t;
4519 /* It is important to do what the instruction name says: STORE THEN.
4520 If we let the output hook perform the store then if we fault and
4521 restart, we'll have the wrong SYSTEM MASK in place. */
4522 t = tcg_temp_new_i64();
4523 tcg_gen_shri_i64(t, psw_mask, 56);
4524 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4525 tcg_temp_free_i64(t);
4527 if (s->fields.op == 0xac) {
4528 tcg_gen_andi_i64(psw_mask, psw_mask,
4529 (i2 << 56) | 0x00ffffffffffffffull);
4530 } else {
4531 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4534 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4535 return DISAS_PC_STALE_NOCHAIN;
4538 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4540 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
4541 tcg_gen_qemu_st_tl(o->in1, o->addr1, MMU_REAL_IDX, s->insn->data);
4543 if (s->base.tb->flags & FLAG_MASK_PER) {
4544 update_psw_addr(s);
4545 gen_helper_per_store_real(cpu_env);
4547 return DISAS_NEXT;
4549 #endif
4551 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4553 gen_helper_stfle(cc_op, cpu_env, o->in2);
4554 set_cc_static(s);
4555 return DISAS_NEXT;
4558 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4560 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4561 return DISAS_NEXT;
4564 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4566 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4567 return DISAS_NEXT;
4570 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4572 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4573 return DISAS_NEXT;
4576 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4578 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4579 return DISAS_NEXT;
4582 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4584 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4585 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4586 gen_helper_stam(cpu_env, r1, o->in2, r3);
4587 tcg_temp_free_i32(r1);
4588 tcg_temp_free_i32(r3);
4589 return DISAS_NEXT;
4592 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4594 int m3 = get_field(s, m3);
4595 int pos, base = s->insn->data;
4596 TCGv_i64 tmp = tcg_temp_new_i64();
4598 pos = base + ctz32(m3) * 8;
4599 switch (m3) {
4600 case 0xf:
4601 /* Effectively a 32-bit store. */
4602 tcg_gen_shri_i64(tmp, o->in1, pos);
4603 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4604 break;
4606 case 0xc:
4607 case 0x6:
4608 case 0x3:
4609 /* Effectively a 16-bit store. */
4610 tcg_gen_shri_i64(tmp, o->in1, pos);
4611 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4612 break;
4614 case 0x8:
4615 case 0x4:
4616 case 0x2:
4617 case 0x1:
4618 /* Effectively an 8-bit store. */
4619 tcg_gen_shri_i64(tmp, o->in1, pos);
4620 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4621 break;
4623 default:
4624 /* This is going to be a sequence of shifts and stores. */
4625 pos = base + 32 - 8;
4626 while (m3) {
4627 if (m3 & 0x8) {
4628 tcg_gen_shri_i64(tmp, o->in1, pos);
4629 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4630 tcg_gen_addi_i64(o->in2, o->in2, 1);
4632 m3 = (m3 << 1) & 0xf;
4633 pos -= 8;
4635 break;
4637 tcg_temp_free_i64(tmp);
4638 return DISAS_NEXT;
4641 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4643 int r1 = get_field(s, r1);
4644 int r3 = get_field(s, r3);
4645 int size = s->insn->data;
4646 TCGv_i64 tsize = tcg_const_i64(size);
4648 while (1) {
4649 if (size == 8) {
4650 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4651 } else {
4652 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4654 if (r1 == r3) {
4655 break;
4657 tcg_gen_add_i64(o->in2, o->in2, tsize);
4658 r1 = (r1 + 1) & 15;
4661 tcg_temp_free_i64(tsize);
4662 return DISAS_NEXT;
4665 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4667 int r1 = get_field(s, r1);
4668 int r3 = get_field(s, r3);
4669 TCGv_i64 t = tcg_temp_new_i64();
4670 TCGv_i64 t4 = tcg_const_i64(4);
4671 TCGv_i64 t32 = tcg_const_i64(32);
4673 while (1) {
4674 tcg_gen_shl_i64(t, regs[r1], t32);
4675 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4676 if (r1 == r3) {
4677 break;
4679 tcg_gen_add_i64(o->in2, o->in2, t4);
4680 r1 = (r1 + 1) & 15;
4683 tcg_temp_free_i64(t);
4684 tcg_temp_free_i64(t4);
4685 tcg_temp_free_i64(t32);
4686 return DISAS_NEXT;
4689 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4691 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4692 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4693 } else if (HAVE_ATOMIC128) {
4694 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4695 } else {
4696 gen_helper_exit_atomic(cpu_env);
4697 return DISAS_NORETURN;
4699 return DISAS_NEXT;
4702 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4704 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4705 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4707 gen_helper_srst(cpu_env, r1, r2);
4709 tcg_temp_free_i32(r1);
4710 tcg_temp_free_i32(r2);
4711 set_cc_static(s);
4712 return DISAS_NEXT;
4715 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4717 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4718 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4720 gen_helper_srstu(cpu_env, r1, r2);
4722 tcg_temp_free_i32(r1);
4723 tcg_temp_free_i32(r2);
4724 set_cc_static(s);
4725 return DISAS_NEXT;
4728 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4730 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4731 return DISAS_NEXT;
4734 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4736 DisasCompare cmp;
4737 TCGv_i64 borrow;
4739 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4741 /* The !borrow flag is the msb of CC. Since we want the inverse of
4742 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4743 disas_jcc(s, &cmp, 8 | 4);
4744 borrow = tcg_temp_new_i64();
4745 if (cmp.is_64) {
4746 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4747 } else {
4748 TCGv_i32 t = tcg_temp_new_i32();
4749 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4750 tcg_gen_extu_i32_i64(borrow, t);
4751 tcg_temp_free_i32(t);
4753 free_compare(&cmp);
4755 tcg_gen_sub_i64(o->out, o->out, borrow);
4756 tcg_temp_free_i64(borrow);
4757 return DISAS_NEXT;
4760 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4762 TCGv_i32 t;
4764 update_psw_addr(s);
4765 update_cc_op(s);
4767 t = tcg_const_i32(get_field(s, i1) & 0xff);
4768 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4769 tcg_temp_free_i32(t);
4771 t = tcg_const_i32(s->ilen);
4772 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4773 tcg_temp_free_i32(t);
4775 gen_exception(EXCP_SVC);
4776 return DISAS_NORETURN;
4779 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4781 int cc = 0;
4783 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4784 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4785 gen_op_movi_cc(s, cc);
4786 return DISAS_NEXT;
4789 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4791 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4792 set_cc_static(s);
4793 return DISAS_NEXT;
4796 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4798 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4799 set_cc_static(s);
4800 return DISAS_NEXT;
4803 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4805 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4806 set_cc_static(s);
4807 return DISAS_NEXT;
4810 #ifndef CONFIG_USER_ONLY
4812 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4814 gen_helper_testblock(cc_op, cpu_env, o->in2);
4815 set_cc_static(s);
4816 return DISAS_NEXT;
4819 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4821 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4822 set_cc_static(s);
4823 return DISAS_NEXT;
4826 #endif
4828 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4830 TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4831 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4832 tcg_temp_free_i32(l1);
4833 set_cc_static(s);
4834 return DISAS_NEXT;
4837 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4839 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4840 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4841 tcg_temp_free_i32(l);
4842 set_cc_static(s);
4843 return DISAS_NEXT;
4846 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4848 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4849 return_low128(o->out2);
4850 set_cc_static(s);
4851 return DISAS_NEXT;
4854 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4856 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4857 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4858 tcg_temp_free_i32(l);
4859 set_cc_static(s);
4860 return DISAS_NEXT;
4863 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4865 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4866 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4867 tcg_temp_free_i32(l);
4868 set_cc_static(s);
4869 return DISAS_NEXT;
4872 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4874 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4875 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4876 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4877 TCGv_i32 tst = tcg_temp_new_i32();
4878 int m3 = get_field(s, m3);
4880 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4881 m3 = 0;
4883 if (m3 & 1) {
4884 tcg_gen_movi_i32(tst, -1);
4885 } else {
4886 tcg_gen_extrl_i64_i32(tst, regs[0]);
4887 if (s->insn->opc & 3) {
4888 tcg_gen_ext8u_i32(tst, tst);
4889 } else {
4890 tcg_gen_ext16u_i32(tst, tst);
4893 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4895 tcg_temp_free_i32(r1);
4896 tcg_temp_free_i32(r2);
4897 tcg_temp_free_i32(sizes);
4898 tcg_temp_free_i32(tst);
4899 set_cc_static(s);
4900 return DISAS_NEXT;
4903 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4905 TCGv_i32 t1 = tcg_const_i32(0xff);
4906 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4907 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4908 tcg_temp_free_i32(t1);
4909 set_cc_static(s);
4910 return DISAS_NEXT;
4913 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4915 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4916 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4917 tcg_temp_free_i32(l);
4918 return DISAS_NEXT;
4921 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4923 int l1 = get_field(s, l1) + 1;
4924 TCGv_i32 l;
4926 /* The length must not exceed 32 bytes. */
4927 if (l1 > 32) {
4928 gen_program_exception(s, PGM_SPECIFICATION);
4929 return DISAS_NORETURN;
4931 l = tcg_const_i32(l1);
4932 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4933 tcg_temp_free_i32(l);
4934 set_cc_static(s);
4935 return DISAS_NEXT;
4938 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4940 int l1 = get_field(s, l1) + 1;
4941 TCGv_i32 l;
4943 /* The length must be even and should not exceed 64 bytes. */
4944 if ((l1 & 1) || (l1 > 64)) {
4945 gen_program_exception(s, PGM_SPECIFICATION);
4946 return DISAS_NORETURN;
4948 l = tcg_const_i32(l1);
4949 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4950 tcg_temp_free_i32(l);
4951 set_cc_static(s);
4952 return DISAS_NEXT;
4956 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4958 int d1 = get_field(s, d1);
4959 int d2 = get_field(s, d2);
4960 int b1 = get_field(s, b1);
4961 int b2 = get_field(s, b2);
4962 int l = get_field(s, l1);
4963 TCGv_i32 t32;
4965 o->addr1 = get_address(s, 0, b1, d1);
4967 /* If the addresses are identical, this is a store/memset of zero. */
4968 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4969 o->in2 = tcg_const_i64(0);
4971 l++;
4972 while (l >= 8) {
4973 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4974 l -= 8;
4975 if (l > 0) {
4976 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4979 if (l >= 4) {
4980 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4981 l -= 4;
4982 if (l > 0) {
4983 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4986 if (l >= 2) {
4987 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4988 l -= 2;
4989 if (l > 0) {
4990 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4993 if (l) {
4994 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4996 gen_op_movi_cc(s, 0);
4997 return DISAS_NEXT;
5000 /* But in general we'll defer to a helper. */
5001 o->in2 = get_address(s, 0, b2, d2);
5002 t32 = tcg_const_i32(l);
5003 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5004 tcg_temp_free_i32(t32);
5005 set_cc_static(s);
5006 return DISAS_NEXT;
5009 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5011 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5012 return DISAS_NEXT;
5015 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5017 int shift = s->insn->data & 0xff;
5018 int size = s->insn->data >> 8;
5019 uint64_t mask = ((1ull << size) - 1) << shift;
5021 assert(!o->g_in2);
5022 tcg_gen_shli_i64(o->in2, o->in2, shift);
5023 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5025 /* Produce the CC from only the bits manipulated. */
5026 tcg_gen_andi_i64(cc_dst, o->out, mask);
5027 set_cc_nz_u64(s, cc_dst);
5028 return DISAS_NEXT;
5031 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5033 o->in1 = tcg_temp_new_i64();
5035 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5036 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5037 } else {
5038 /* Perform the atomic operation in memory. */
5039 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5040 s->insn->data);
5043 /* Recompute also for atomic case: needed for setting CC. */
5044 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5046 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5047 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5049 return DISAS_NEXT;
5052 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5054 o->out = tcg_const_i64(0);
5055 return DISAS_NEXT;
5058 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5060 o->out = tcg_const_i64(0);
5061 o->out2 = o->out;
5062 o->g_out2 = true;
5063 return DISAS_NEXT;
5066 #ifndef CONFIG_USER_ONLY
5067 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5069 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5071 gen_helper_clp(cpu_env, r2);
5072 tcg_temp_free_i32(r2);
5073 set_cc_static(s);
5074 return DISAS_NEXT;
5077 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5079 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5080 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5082 gen_helper_pcilg(cpu_env, r1, r2);
5083 tcg_temp_free_i32(r1);
5084 tcg_temp_free_i32(r2);
5085 set_cc_static(s);
5086 return DISAS_NEXT;
5089 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5091 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5092 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5094 gen_helper_pcistg(cpu_env, r1, r2);
5095 tcg_temp_free_i32(r1);
5096 tcg_temp_free_i32(r2);
5097 set_cc_static(s);
5098 return DISAS_NEXT;
5101 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5103 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5104 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5106 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5107 tcg_temp_free_i32(ar);
5108 tcg_temp_free_i32(r1);
5109 set_cc_static(s);
5110 return DISAS_NEXT;
5113 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5115 gen_helper_sic(cpu_env, o->in1, o->in2);
5116 return DISAS_NEXT;
5119 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5121 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5122 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5124 gen_helper_rpcit(cpu_env, r1, r2);
5125 tcg_temp_free_i32(r1);
5126 tcg_temp_free_i32(r2);
5127 set_cc_static(s);
5128 return DISAS_NEXT;
5131 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5133 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5134 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5135 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5137 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5138 tcg_temp_free_i32(ar);
5139 tcg_temp_free_i32(r1);
5140 tcg_temp_free_i32(r3);
5141 set_cc_static(s);
5142 return DISAS_NEXT;
5145 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5147 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5148 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5150 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5151 tcg_temp_free_i32(ar);
5152 tcg_temp_free_i32(r1);
5153 set_cc_static(s);
5154 return DISAS_NEXT;
5156 #endif
5158 #include "translate_vx.c.inc"
5160 /* ====================================================================== */
5161 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5162 the original inputs), update the various cc data structures in order to
5163 be able to compute the new condition code. */
5165 static void cout_abs32(DisasContext *s, DisasOps *o)
5167 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5170 static void cout_abs64(DisasContext *s, DisasOps *o)
5172 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5175 static void cout_adds32(DisasContext *s, DisasOps *o)
5177 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5180 static void cout_adds64(DisasContext *s, DisasOps *o)
5182 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5185 static void cout_addu32(DisasContext *s, DisasOps *o)
5187 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5190 static void cout_addu64(DisasContext *s, DisasOps *o)
5192 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5195 static void cout_addc32(DisasContext *s, DisasOps *o)
5197 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5200 static void cout_addc64(DisasContext *s, DisasOps *o)
5202 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5205 static void cout_cmps32(DisasContext *s, DisasOps *o)
5207 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5210 static void cout_cmps64(DisasContext *s, DisasOps *o)
5212 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5215 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5217 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5220 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5222 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5225 static void cout_f32(DisasContext *s, DisasOps *o)
5227 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5230 static void cout_f64(DisasContext *s, DisasOps *o)
5232 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5235 static void cout_f128(DisasContext *s, DisasOps *o)
5237 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5240 static void cout_nabs32(DisasContext *s, DisasOps *o)
5242 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5245 static void cout_nabs64(DisasContext *s, DisasOps *o)
5247 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5250 static void cout_neg32(DisasContext *s, DisasOps *o)
5252 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5255 static void cout_neg64(DisasContext *s, DisasOps *o)
5257 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5260 static void cout_nz32(DisasContext *s, DisasOps *o)
5262 tcg_gen_ext32u_i64(cc_dst, o->out);
5263 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5266 static void cout_nz64(DisasContext *s, DisasOps *o)
5268 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5271 static void cout_s32(DisasContext *s, DisasOps *o)
5273 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5276 static void cout_s64(DisasContext *s, DisasOps *o)
5278 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5281 static void cout_subs32(DisasContext *s, DisasOps *o)
5283 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5286 static void cout_subs64(DisasContext *s, DisasOps *o)
5288 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5291 static void cout_subu32(DisasContext *s, DisasOps *o)
5293 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5296 static void cout_subu64(DisasContext *s, DisasOps *o)
5298 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5301 static void cout_subb32(DisasContext *s, DisasOps *o)
5303 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5306 static void cout_subb64(DisasContext *s, DisasOps *o)
5308 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5311 static void cout_tm32(DisasContext *s, DisasOps *o)
5313 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5316 static void cout_tm64(DisasContext *s, DisasOps *o)
5318 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5321 static void cout_muls32(DisasContext *s, DisasOps *o)
5323 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5326 static void cout_muls64(DisasContext *s, DisasOps *o)
5328 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5329 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5332 /* ====================================================================== */
5333 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5334 with the TCG register to which we will write. Used in combination with
5335 the "wout" generators, in some cases we need a new temporary, and in
5336 some cases we can write to a TCG global. */
5338 static void prep_new(DisasContext *s, DisasOps *o)
5340 o->out = tcg_temp_new_i64();
5342 #define SPEC_prep_new 0
5344 static void prep_new_P(DisasContext *s, DisasOps *o)
5346 o->out = tcg_temp_new_i64();
5347 o->out2 = tcg_temp_new_i64();
5349 #define SPEC_prep_new_P 0
5351 static void prep_r1(DisasContext *s, DisasOps *o)
5353 o->out = regs[get_field(s, r1)];
5354 o->g_out = true;
5356 #define SPEC_prep_r1 0
5358 static void prep_r1_P(DisasContext *s, DisasOps *o)
5360 int r1 = get_field(s, r1);
5361 o->out = regs[r1];
5362 o->out2 = regs[r1 + 1];
5363 o->g_out = o->g_out2 = true;
5365 #define SPEC_prep_r1_P SPEC_r1_even
5367 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5368 static void prep_x1(DisasContext *s, DisasOps *o)
5370 o->out = load_freg(get_field(s, r1));
5371 o->out2 = load_freg(get_field(s, r1) + 2);
5373 #define SPEC_prep_x1 SPEC_r1_f128
5375 /* ====================================================================== */
5376 /* The "Write OUTput" generators. These generally perform some non-trivial
5377 copy of data to TCG globals, or to main memory. The trivial cases are
5378 generally handled by having a "prep" generator install the TCG global
5379 as the destination of the operation. */
5381 static void wout_r1(DisasContext *s, DisasOps *o)
5383 store_reg(get_field(s, r1), o->out);
5385 #define SPEC_wout_r1 0
5387 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5389 store_reg(get_field(s, r1), o->out2);
5391 #define SPEC_wout_out2_r1 0
5393 static void wout_r1_8(DisasContext *s, DisasOps *o)
5395 int r1 = get_field(s, r1);
5396 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5398 #define SPEC_wout_r1_8 0
5400 static void wout_r1_16(DisasContext *s, DisasOps *o)
5402 int r1 = get_field(s, r1);
5403 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5405 #define SPEC_wout_r1_16 0
5407 static void wout_r1_32(DisasContext *s, DisasOps *o)
5409 store_reg32_i64(get_field(s, r1), o->out);
5411 #define SPEC_wout_r1_32 0
5413 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5415 store_reg32h_i64(get_field(s, r1), o->out);
5417 #define SPEC_wout_r1_32h 0
5419 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5421 int r1 = get_field(s, r1);
5422 store_reg32_i64(r1, o->out);
5423 store_reg32_i64(r1 + 1, o->out2);
5425 #define SPEC_wout_r1_P32 SPEC_r1_even
5427 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5429 int r1 = get_field(s, r1);
5430 store_reg32_i64(r1 + 1, o->out);
5431 tcg_gen_shri_i64(o->out, o->out, 32);
5432 store_reg32_i64(r1, o->out);
5434 #define SPEC_wout_r1_D32 SPEC_r1_even
5436 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5438 int r3 = get_field(s, r3);
5439 store_reg32_i64(r3, o->out);
5440 store_reg32_i64(r3 + 1, o->out2);
5442 #define SPEC_wout_r3_P32 SPEC_r3_even
5444 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5446 int r3 = get_field(s, r3);
5447 store_reg(r3, o->out);
5448 store_reg(r3 + 1, o->out2);
5450 #define SPEC_wout_r3_P64 SPEC_r3_even
5452 static void wout_e1(DisasContext *s, DisasOps *o)
5454 store_freg32_i64(get_field(s, r1), o->out);
5456 #define SPEC_wout_e1 0
5458 static void wout_f1(DisasContext *s, DisasOps *o)
5460 store_freg(get_field(s, r1), o->out);
5462 #define SPEC_wout_f1 0
5464 static void wout_x1(DisasContext *s, DisasOps *o)
5466 int f1 = get_field(s, r1);
5467 store_freg(f1, o->out);
5468 store_freg(f1 + 2, o->out2);
5470 #define SPEC_wout_x1 SPEC_r1_f128
5472 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5474 if (get_field(s, r1) != get_field(s, r2)) {
5475 store_reg32_i64(get_field(s, r1), o->out);
5478 #define SPEC_wout_cond_r1r2_32 0
5480 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5482 if (get_field(s, r1) != get_field(s, r2)) {
5483 store_freg32_i64(get_field(s, r1), o->out);
5486 #define SPEC_wout_cond_e1e2 0
5488 static void wout_m1_8(DisasContext *s, DisasOps *o)
5490 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5492 #define SPEC_wout_m1_8 0
5494 static void wout_m1_16(DisasContext *s, DisasOps *o)
5496 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5498 #define SPEC_wout_m1_16 0
5500 #ifndef CONFIG_USER_ONLY
5501 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5503 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5505 #define SPEC_wout_m1_16a 0
5506 #endif
5508 static void wout_m1_32(DisasContext *s, DisasOps *o)
5510 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5512 #define SPEC_wout_m1_32 0
5514 #ifndef CONFIG_USER_ONLY
5515 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5517 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5519 #define SPEC_wout_m1_32a 0
5520 #endif
5522 static void wout_m1_64(DisasContext *s, DisasOps *o)
5524 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5526 #define SPEC_wout_m1_64 0
5528 #ifndef CONFIG_USER_ONLY
5529 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5531 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5533 #define SPEC_wout_m1_64a 0
5534 #endif
5536 static void wout_m2_32(DisasContext *s, DisasOps *o)
5538 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5540 #define SPEC_wout_m2_32 0
5542 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5544 store_reg(get_field(s, r1), o->in2);
5546 #define SPEC_wout_in2_r1 0
5548 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5550 store_reg32_i64(get_field(s, r1), o->in2);
5552 #define SPEC_wout_in2_r1_32 0
5554 /* ====================================================================== */
5555 /* The "INput 1" generators. These load the first operand to an insn. */
5557 static void in1_r1(DisasContext *s, DisasOps *o)
5559 o->in1 = load_reg(get_field(s, r1));
5561 #define SPEC_in1_r1 0
5563 static void in1_r1_o(DisasContext *s, DisasOps *o)
5565 o->in1 = regs[get_field(s, r1)];
5566 o->g_in1 = true;
5568 #define SPEC_in1_r1_o 0
5570 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5572 o->in1 = tcg_temp_new_i64();
5573 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5575 #define SPEC_in1_r1_32s 0
5577 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5579 o->in1 = tcg_temp_new_i64();
5580 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5582 #define SPEC_in1_r1_32u 0
5584 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5586 o->in1 = tcg_temp_new_i64();
5587 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5589 #define SPEC_in1_r1_sr32 0
5591 static void in1_r1p1(DisasContext *s, DisasOps *o)
5593 o->in1 = load_reg(get_field(s, r1) + 1);
5595 #define SPEC_in1_r1p1 SPEC_r1_even
5597 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5599 o->in1 = regs[get_field(s, r1) + 1];
5600 o->g_in1 = true;
5602 #define SPEC_in1_r1p1_o SPEC_r1_even
5604 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5606 o->in1 = tcg_temp_new_i64();
5607 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5609 #define SPEC_in1_r1p1_32s SPEC_r1_even
5611 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5613 o->in1 = tcg_temp_new_i64();
5614 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5616 #define SPEC_in1_r1p1_32u SPEC_r1_even
5618 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5620 int r1 = get_field(s, r1);
5621 o->in1 = tcg_temp_new_i64();
5622 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5624 #define SPEC_in1_r1_D32 SPEC_r1_even
5626 static void in1_r2(DisasContext *s, DisasOps *o)
5628 o->in1 = load_reg(get_field(s, r2));
5630 #define SPEC_in1_r2 0
5632 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5634 o->in1 = tcg_temp_new_i64();
5635 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5637 #define SPEC_in1_r2_sr32 0
5639 static void in1_r3(DisasContext *s, DisasOps *o)
5641 o->in1 = load_reg(get_field(s, r3));
5643 #define SPEC_in1_r3 0
5645 static void in1_r3_o(DisasContext *s, DisasOps *o)
5647 o->in1 = regs[get_field(s, r3)];
5648 o->g_in1 = true;
5650 #define SPEC_in1_r3_o 0
5652 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5654 o->in1 = tcg_temp_new_i64();
5655 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5657 #define SPEC_in1_r3_32s 0
5659 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5661 o->in1 = tcg_temp_new_i64();
5662 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5664 #define SPEC_in1_r3_32u 0
5666 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5668 int r3 = get_field(s, r3);
5669 o->in1 = tcg_temp_new_i64();
5670 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5672 #define SPEC_in1_r3_D32 SPEC_r3_even
5674 static void in1_e1(DisasContext *s, DisasOps *o)
5676 o->in1 = load_freg32_i64(get_field(s, r1));
5678 #define SPEC_in1_e1 0
5680 static void in1_f1(DisasContext *s, DisasOps *o)
5682 o->in1 = load_freg(get_field(s, r1));
5684 #define SPEC_in1_f1 0
5686 /* Load the high double word of an extended (128-bit) format FP number */
5687 static void in1_x2h(DisasContext *s, DisasOps *o)
5689 o->in1 = load_freg(get_field(s, r2));
5691 #define SPEC_in1_x2h SPEC_r2_f128
5693 static void in1_f3(DisasContext *s, DisasOps *o)
5695 o->in1 = load_freg(get_field(s, r3));
5697 #define SPEC_in1_f3 0
5699 static void in1_la1(DisasContext *s, DisasOps *o)
5701 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5703 #define SPEC_in1_la1 0
5705 static void in1_la2(DisasContext *s, DisasOps *o)
5707 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5708 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5710 #define SPEC_in1_la2 0
5712 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5714 in1_la1(s, o);
5715 o->in1 = tcg_temp_new_i64();
5716 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5718 #define SPEC_in1_m1_8u 0
5720 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5722 in1_la1(s, o);
5723 o->in1 = tcg_temp_new_i64();
5724 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5726 #define SPEC_in1_m1_16s 0
5728 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5730 in1_la1(s, o);
5731 o->in1 = tcg_temp_new_i64();
5732 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5734 #define SPEC_in1_m1_16u 0
5736 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5738 in1_la1(s, o);
5739 o->in1 = tcg_temp_new_i64();
5740 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5742 #define SPEC_in1_m1_32s 0
5744 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5746 in1_la1(s, o);
5747 o->in1 = tcg_temp_new_i64();
5748 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5750 #define SPEC_in1_m1_32u 0
5752 static void in1_m1_64(DisasContext *s, DisasOps *o)
5754 in1_la1(s, o);
5755 o->in1 = tcg_temp_new_i64();
5756 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5758 #define SPEC_in1_m1_64 0
5760 /* ====================================================================== */
5761 /* The "INput 2" generators. These load the second operand to an insn. */
5763 static void in2_r1_o(DisasContext *s, DisasOps *o)
5765 o->in2 = regs[get_field(s, r1)];
5766 o->g_in2 = true;
5768 #define SPEC_in2_r1_o 0
5770 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5772 o->in2 = tcg_temp_new_i64();
5773 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5775 #define SPEC_in2_r1_16u 0
5777 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5779 o->in2 = tcg_temp_new_i64();
5780 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5782 #define SPEC_in2_r1_32u 0
5784 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5786 int r1 = get_field(s, r1);
5787 o->in2 = tcg_temp_new_i64();
5788 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5790 #define SPEC_in2_r1_D32 SPEC_r1_even
5792 static void in2_r2(DisasContext *s, DisasOps *o)
5794 o->in2 = load_reg(get_field(s, r2));
5796 #define SPEC_in2_r2 0
5798 static void in2_r2_o(DisasContext *s, DisasOps *o)
5800 o->in2 = regs[get_field(s, r2)];
5801 o->g_in2 = true;
5803 #define SPEC_in2_r2_o 0
5805 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5807 int r2 = get_field(s, r2);
5808 if (r2 != 0) {
5809 o->in2 = load_reg(r2);
5812 #define SPEC_in2_r2_nz 0
5814 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5816 o->in2 = tcg_temp_new_i64();
5817 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5819 #define SPEC_in2_r2_8s 0
5821 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5823 o->in2 = tcg_temp_new_i64();
5824 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5826 #define SPEC_in2_r2_8u 0
5828 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5830 o->in2 = tcg_temp_new_i64();
5831 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5833 #define SPEC_in2_r2_16s 0
5835 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5837 o->in2 = tcg_temp_new_i64();
5838 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5840 #define SPEC_in2_r2_16u 0
5842 static void in2_r3(DisasContext *s, DisasOps *o)
5844 o->in2 = load_reg(get_field(s, r3));
5846 #define SPEC_in2_r3 0
5848 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5850 o->in2 = tcg_temp_new_i64();
5851 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5853 #define SPEC_in2_r3_sr32 0
5855 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5857 o->in2 = tcg_temp_new_i64();
5858 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5860 #define SPEC_in2_r3_32u 0
5862 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5864 o->in2 = tcg_temp_new_i64();
5865 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5867 #define SPEC_in2_r2_32s 0
5869 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5871 o->in2 = tcg_temp_new_i64();
5872 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5874 #define SPEC_in2_r2_32u 0
5876 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5878 o->in2 = tcg_temp_new_i64();
5879 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5881 #define SPEC_in2_r2_sr32 0
5883 static void in2_e2(DisasContext *s, DisasOps *o)
5885 o->in2 = load_freg32_i64(get_field(s, r2));
5887 #define SPEC_in2_e2 0
5889 static void in2_f2(DisasContext *s, DisasOps *o)
5891 o->in2 = load_freg(get_field(s, r2));
5893 #define SPEC_in2_f2 0
5895 /* Load the low double word of an extended (128-bit) format FP number */
5896 static void in2_x2l(DisasContext *s, DisasOps *o)
5898 o->in2 = load_freg(get_field(s, r2) + 2);
5900 #define SPEC_in2_x2l SPEC_r2_f128
5902 static void in2_ra2(DisasContext *s, DisasOps *o)
5904 o->in2 = get_address(s, 0, get_field(s, r2), 0);
5906 #define SPEC_in2_ra2 0
5908 static void in2_a2(DisasContext *s, DisasOps *o)
5910 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5911 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5913 #define SPEC_in2_a2 0
5915 static void in2_ri2(DisasContext *s, DisasOps *o)
5917 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5919 #define SPEC_in2_ri2 0
5921 static void in2_sh32(DisasContext *s, DisasOps *o)
5923 help_l2_shift(s, o, 31);
5925 #define SPEC_in2_sh32 0
5927 static void in2_sh64(DisasContext *s, DisasOps *o)
5929 help_l2_shift(s, o, 63);
5931 #define SPEC_in2_sh64 0
5933 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5935 in2_a2(s, o);
5936 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5938 #define SPEC_in2_m2_8u 0
5940 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5942 in2_a2(s, o);
5943 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5945 #define SPEC_in2_m2_16s 0
5947 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5949 in2_a2(s, o);
5950 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5952 #define SPEC_in2_m2_16u 0
5954 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5956 in2_a2(s, o);
5957 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5959 #define SPEC_in2_m2_32s 0
5961 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5963 in2_a2(s, o);
5964 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5966 #define SPEC_in2_m2_32u 0
5968 #ifndef CONFIG_USER_ONLY
5969 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5971 in2_a2(s, o);
5972 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5974 #define SPEC_in2_m2_32ua 0
5975 #endif
5977 static void in2_m2_64(DisasContext *s, DisasOps *o)
5979 in2_a2(s, o);
5980 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5982 #define SPEC_in2_m2_64 0
5984 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5986 in2_a2(s, o);
5987 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5988 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5990 #define SPEC_in2_m2_64w 0
5992 #ifndef CONFIG_USER_ONLY
5993 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5995 in2_a2(s, o);
5996 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5998 #define SPEC_in2_m2_64a 0
5999 #endif
6001 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6003 in2_ri2(s, o);
6004 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6006 #define SPEC_in2_mri2_16u 0
6008 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6010 in2_ri2(s, o);
6011 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6013 #define SPEC_in2_mri2_32s 0
6015 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6017 in2_ri2(s, o);
6018 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6020 #define SPEC_in2_mri2_32u 0
6022 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6024 in2_ri2(s, o);
6025 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6027 #define SPEC_in2_mri2_64 0
6029 static void in2_i2(DisasContext *s, DisasOps *o)
6031 o->in2 = tcg_const_i64(get_field(s, i2));
6033 #define SPEC_in2_i2 0
6035 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6037 o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6039 #define SPEC_in2_i2_8u 0
6041 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6043 o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6045 #define SPEC_in2_i2_16u 0
6047 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6049 o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6051 #define SPEC_in2_i2_32u 0
6053 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6055 uint64_t i2 = (uint16_t)get_field(s, i2);
6056 o->in2 = tcg_const_i64(i2 << s->insn->data);
6058 #define SPEC_in2_i2_16u_shl 0
6060 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6062 uint64_t i2 = (uint32_t)get_field(s, i2);
6063 o->in2 = tcg_const_i64(i2 << s->insn->data);
6065 #define SPEC_in2_i2_32u_shl 0
6067 #ifndef CONFIG_USER_ONLY
6068 static void in2_insn(DisasContext *s, DisasOps *o)
6070 o->in2 = tcg_const_i64(s->fields.raw_insn);
6072 #define SPEC_in2_insn 0
6073 #endif
6075 /* ====================================================================== */
6077 /* Find opc within the table of insns. This is formulated as a switch
6078 statement so that (1) we get compile-time notice of cut-paste errors
6079 for duplicated opcodes, and (2) the compiler generates the binary
6080 search tree, rather than us having to post-process the table. */
6082 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6083 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6085 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6086 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6088 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6089 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6091 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6093 enum DisasInsnEnum {
6094 #include "insn-data.def"
6097 #undef E
6098 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6099 .opc = OPC, \
6100 .flags = FL, \
6101 .fmt = FMT_##FT, \
6102 .fac = FAC_##FC, \
6103 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6104 .name = #NM, \
6105 .help_in1 = in1_##I1, \
6106 .help_in2 = in2_##I2, \
6107 .help_prep = prep_##P, \
6108 .help_wout = wout_##W, \
6109 .help_cout = cout_##CC, \
6110 .help_op = op_##OP, \
6111 .data = D \
6114 /* Allow 0 to be used for NULL in the table below. */
6115 #define in1_0 NULL
6116 #define in2_0 NULL
6117 #define prep_0 NULL
6118 #define wout_0 NULL
6119 #define cout_0 NULL
6120 #define op_0 NULL
6122 #define SPEC_in1_0 0
6123 #define SPEC_in2_0 0
6124 #define SPEC_prep_0 0
6125 #define SPEC_wout_0 0
6127 /* Give smaller names to the various facilities. */
6128 #define FAC_Z S390_FEAT_ZARCH
6129 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6130 #define FAC_DFP S390_FEAT_DFP
6131 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6132 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6133 #define FAC_EE S390_FEAT_EXECUTE_EXT
6134 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6135 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6136 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6137 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6138 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6139 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6140 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6141 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6142 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6143 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6144 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6145 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6146 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6147 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6148 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6149 #define FAC_SFLE S390_FEAT_STFLE
6150 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6151 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6152 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6153 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6154 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6155 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6156 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6157 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6158 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6159 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6160 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6161 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6162 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6163 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6164 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6165 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6166 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6167 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6168 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6169 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6171 static const DisasInsn insn_info[] = {
6172 #include "insn-data.def"
6175 #undef E
6176 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6177 case OPC: return &insn_info[insn_ ## NM];
6179 static const DisasInsn *lookup_opc(uint16_t opc)
6181 switch (opc) {
6182 #include "insn-data.def"
6183 default:
6184 return NULL;
6188 #undef F
6189 #undef E
6190 #undef D
6191 #undef C
6193 /* Extract a field from the insn. The INSN should be left-aligned in
6194 the uint64_t so that we can more easily utilize the big-bit-endian
6195 definitions we extract from the Principals of Operation. */
6197 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6199 uint32_t r, m;
6201 if (f->size == 0) {
6202 return;
6205 /* Zero extract the field from the insn. */
6206 r = (insn << f->beg) >> (64 - f->size);
6208 /* Sign-extend, or un-swap the field as necessary. */
6209 switch (f->type) {
6210 case 0: /* unsigned */
6211 break;
6212 case 1: /* signed */
6213 assert(f->size <= 32);
6214 m = 1u << (f->size - 1);
6215 r = (r ^ m) - m;
6216 break;
6217 case 2: /* dl+dh split, signed 20 bit. */
6218 r = ((int8_t)r << 12) | (r >> 8);
6219 break;
6220 case 3: /* MSB stored in RXB */
6221 g_assert(f->size == 4);
6222 switch (f->beg) {
6223 case 8:
6224 r |= extract64(insn, 63 - 36, 1) << 4;
6225 break;
6226 case 12:
6227 r |= extract64(insn, 63 - 37, 1) << 4;
6228 break;
6229 case 16:
6230 r |= extract64(insn, 63 - 38, 1) << 4;
6231 break;
6232 case 32:
6233 r |= extract64(insn, 63 - 39, 1) << 4;
6234 break;
6235 default:
6236 g_assert_not_reached();
6238 break;
6239 default:
6240 abort();
6243 /* Validate that the "compressed" encoding we selected above is valid.
6244 I.e. we havn't make two different original fields overlap. */
6245 assert(((o->presentC >> f->indexC) & 1) == 0);
6246 o->presentC |= 1 << f->indexC;
6247 o->presentO |= 1 << f->indexO;
6249 o->c[f->indexC] = r;
6252 /* Lookup the insn at the current PC, extracting the operands into O and
6253 returning the info struct for the insn. Returns NULL for invalid insn. */
6255 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6257 uint64_t insn, pc = s->base.pc_next;
6258 int op, op2, ilen;
6259 const DisasInsn *info;
6261 if (unlikely(s->ex_value)) {
6262 /* Drop the EX data now, so that it's clear on exception paths. */
6263 TCGv_i64 zero = tcg_const_i64(0);
6264 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6265 tcg_temp_free_i64(zero);
6267 /* Extract the values saved by EXECUTE. */
6268 insn = s->ex_value & 0xffffffffffff0000ull;
6269 ilen = s->ex_value & 0xf;
6270 op = insn >> 56;
6271 } else {
6272 insn = ld_code2(env, pc);
6273 op = (insn >> 8) & 0xff;
6274 ilen = get_ilen(op);
6275 switch (ilen) {
6276 case 2:
6277 insn = insn << 48;
6278 break;
6279 case 4:
6280 insn = ld_code4(env, pc) << 32;
6281 break;
6282 case 6:
6283 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6284 break;
6285 default:
6286 g_assert_not_reached();
6289 s->pc_tmp = s->base.pc_next + ilen;
6290 s->ilen = ilen;
6292 /* We can't actually determine the insn format until we've looked up
6293 the full insn opcode. Which we can't do without locating the
6294 secondary opcode. Assume by default that OP2 is at bit 40; for
6295 those smaller insns that don't actually have a secondary opcode
6296 this will correctly result in OP2 = 0. */
6297 switch (op) {
6298 case 0x01: /* E */
6299 case 0x80: /* S */
6300 case 0x82: /* S */
6301 case 0x93: /* S */
6302 case 0xb2: /* S, RRF, RRE, IE */
6303 case 0xb3: /* RRE, RRD, RRF */
6304 case 0xb9: /* RRE, RRF */
6305 case 0xe5: /* SSE, SIL */
6306 op2 = (insn << 8) >> 56;
6307 break;
6308 case 0xa5: /* RI */
6309 case 0xa7: /* RI */
6310 case 0xc0: /* RIL */
6311 case 0xc2: /* RIL */
6312 case 0xc4: /* RIL */
6313 case 0xc6: /* RIL */
6314 case 0xc8: /* SSF */
6315 case 0xcc: /* RIL */
6316 op2 = (insn << 12) >> 60;
6317 break;
6318 case 0xc5: /* MII */
6319 case 0xc7: /* SMI */
6320 case 0xd0 ... 0xdf: /* SS */
6321 case 0xe1: /* SS */
6322 case 0xe2: /* SS */
6323 case 0xe8: /* SS */
6324 case 0xe9: /* SS */
6325 case 0xea: /* SS */
6326 case 0xee ... 0xf3: /* SS */
6327 case 0xf8 ... 0xfd: /* SS */
6328 op2 = 0;
6329 break;
6330 default:
6331 op2 = (insn << 40) >> 56;
6332 break;
6335 memset(&s->fields, 0, sizeof(s->fields));
6336 s->fields.raw_insn = insn;
6337 s->fields.op = op;
6338 s->fields.op2 = op2;
6340 /* Lookup the instruction. */
6341 info = lookup_opc(op << 8 | op2);
6342 s->insn = info;
6344 /* If we found it, extract the operands. */
6345 if (info != NULL) {
6346 DisasFormat fmt = info->fmt;
6347 int i;
6349 for (i = 0; i < NUM_C_FIELD; ++i) {
6350 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6353 return info;
6356 static bool is_afp_reg(int reg)
6358 return reg % 2 || reg > 6;
6361 static bool is_fp_pair(int reg)
6363 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6364 return !(reg & 0x2);
6367 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6369 const DisasInsn *insn;
6370 DisasJumpType ret = DISAS_NEXT;
6371 DisasOps o = {};
6373 /* Search for the insn in the table. */
6374 insn = extract_insn(env, s);
6376 /* Emit insn_start now that we know the ILEN. */
6377 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6379 /* Not found means unimplemented/illegal opcode. */
6380 if (insn == NULL) {
6381 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6382 s->fields.op, s->fields.op2);
6383 gen_illegal_opcode(s);
6384 return DISAS_NORETURN;
6387 #ifndef CONFIG_USER_ONLY
6388 if (s->base.tb->flags & FLAG_MASK_PER) {
6389 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6390 gen_helper_per_ifetch(cpu_env, addr);
6391 tcg_temp_free_i64(addr);
6393 #endif
6395 /* process flags */
6396 if (insn->flags) {
6397 /* privileged instruction */
6398 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6399 gen_program_exception(s, PGM_PRIVILEGED);
6400 return DISAS_NORETURN;
6403 /* if AFP is not enabled, instructions and registers are forbidden */
6404 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6405 uint8_t dxc = 0;
6407 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6408 dxc = 1;
6410 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6411 dxc = 1;
6413 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6414 dxc = 1;
6416 if (insn->flags & IF_BFP) {
6417 dxc = 2;
6419 if (insn->flags & IF_DFP) {
6420 dxc = 3;
6422 if (insn->flags & IF_VEC) {
6423 dxc = 0xfe;
6425 if (dxc) {
6426 gen_data_exception(dxc);
6427 return DISAS_NORETURN;
6431 /* if vector instructions not enabled, executing them is forbidden */
6432 if (insn->flags & IF_VEC) {
6433 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6434 gen_data_exception(0xfe);
6435 return DISAS_NORETURN;
6440 /* Check for insn specification exceptions. */
6441 if (insn->spec) {
6442 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6443 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6444 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6445 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6446 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6447 gen_program_exception(s, PGM_SPECIFICATION);
6448 return DISAS_NORETURN;
6452 /* Implement the instruction. */
6453 if (insn->help_in1) {
6454 insn->help_in1(s, &o);
6456 if (insn->help_in2) {
6457 insn->help_in2(s, &o);
6459 if (insn->help_prep) {
6460 insn->help_prep(s, &o);
6462 if (insn->help_op) {
6463 ret = insn->help_op(s, &o);
6465 if (ret != DISAS_NORETURN) {
6466 if (insn->help_wout) {
6467 insn->help_wout(s, &o);
6469 if (insn->help_cout) {
6470 insn->help_cout(s, &o);
6474 /* Free any temporaries created by the helpers. */
6475 if (o.out && !o.g_out) {
6476 tcg_temp_free_i64(o.out);
6478 if (o.out2 && !o.g_out2) {
6479 tcg_temp_free_i64(o.out2);
6481 if (o.in1 && !o.g_in1) {
6482 tcg_temp_free_i64(o.in1);
6484 if (o.in2 && !o.g_in2) {
6485 tcg_temp_free_i64(o.in2);
6487 if (o.addr1) {
6488 tcg_temp_free_i64(o.addr1);
6491 #ifndef CONFIG_USER_ONLY
6492 if (s->base.tb->flags & FLAG_MASK_PER) {
6493 /* An exception might be triggered, save PSW if not already done. */
6494 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6495 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6498 /* Call the helper to check for a possible PER exception. */
6499 gen_helper_per_check_exception(cpu_env);
6501 #endif
6503 /* Advance to the next instruction. */
6504 s->base.pc_next = s->pc_tmp;
6505 return ret;
6508 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6510 DisasContext *dc = container_of(dcbase, DisasContext, base);
6512 /* 31-bit mode */
6513 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6514 dc->base.pc_first &= 0x7fffffff;
6515 dc->base.pc_next = dc->base.pc_first;
6518 dc->cc_op = CC_OP_DYNAMIC;
6519 dc->ex_value = dc->base.tb->cs_base;
6520 dc->do_debug = dc->base.singlestep_enabled;
6523 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6527 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6531 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6532 const CPUBreakpoint *bp)
6534 DisasContext *dc = container_of(dcbase, DisasContext, base);
6537 * Emit an insn_start to accompany the breakpoint exception.
6538 * The ILEN value is a dummy, since this does not result in
6539 * an s390x exception, but an internal qemu exception which
6540 * brings us back to interact with the gdbstub.
6542 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6544 dc->base.is_jmp = DISAS_PC_STALE;
6545 dc->do_debug = true;
6546 /* The address covered by the breakpoint must be included in
6547 [tb->pc, tb->pc + tb->size) in order to for it to be
6548 properly cleared -- thus we increment the PC here so that
6549 the logic setting tb->size does the right thing. */
6550 dc->base.pc_next += 2;
6551 return true;
6554 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6556 CPUS390XState *env = cs->env_ptr;
6557 DisasContext *dc = container_of(dcbase, DisasContext, base);
6559 dc->base.is_jmp = translate_one(env, dc);
6560 if (dc->base.is_jmp == DISAS_NEXT) {
6561 uint64_t page_start;
6563 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6564 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6565 dc->base.is_jmp = DISAS_TOO_MANY;
6570 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6572 DisasContext *dc = container_of(dcbase, DisasContext, base);
6574 switch (dc->base.is_jmp) {
6575 case DISAS_GOTO_TB:
6576 case DISAS_NORETURN:
6577 break;
6578 case DISAS_TOO_MANY:
6579 case DISAS_PC_STALE:
6580 case DISAS_PC_STALE_NOCHAIN:
6581 update_psw_addr(dc);
6582 /* FALLTHRU */
6583 case DISAS_PC_UPDATED:
6584 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6585 cc op type is in env */
6586 update_cc_op(dc);
6587 /* FALLTHRU */
6588 case DISAS_PC_CC_UPDATED:
6589 /* Exit the TB, either by raising a debug exception or by return. */
6590 if (dc->do_debug) {
6591 gen_exception(EXCP_DEBUG);
6592 } else if (use_exit_tb(dc) ||
6593 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6594 tcg_gen_exit_tb(NULL, 0);
6595 } else {
6596 tcg_gen_lookup_and_goto_ptr();
6598 break;
6599 default:
6600 g_assert_not_reached();
6604 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6606 DisasContext *dc = container_of(dcbase, DisasContext, base);
6608 if (unlikely(dc->ex_value)) {
6609 /* ??? Unfortunately log_target_disas can't use host memory. */
6610 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6611 } else {
6612 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6613 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6617 static const TranslatorOps s390x_tr_ops = {
6618 .init_disas_context = s390x_tr_init_disas_context,
6619 .tb_start = s390x_tr_tb_start,
6620 .insn_start = s390x_tr_insn_start,
6621 .breakpoint_check = s390x_tr_breakpoint_check,
6622 .translate_insn = s390x_tr_translate_insn,
6623 .tb_stop = s390x_tr_tb_stop,
6624 .disas_log = s390x_tr_disas_log,
6627 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6629 DisasContext dc;
6631 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6634 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6635 target_ulong *data)
6637 int cc_op = data[1];
6639 env->psw.addr = data[0];
6641 /* Update the CC opcode if it is not already up-to-date. */
6642 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6643 env->cc_op = cc_op;
6646 /* Record ILEN. */
6647 env->int_pgm_ilen = data[2];