chardev: forbid 'wait' option with client sockets
[qemu/ar7.git] / target / s390x / translate.c
blob639084af07d0874d6d250ddd585f0db0b579f90a
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
55 struct DisasContext {
56 DisasContextBase base;
57 const DisasInsn *insn;
58 DisasFields *fields;
59 uint64_t ex_value;
61 * During translate_one(), pc_tmp is used to determine the instruction
62 * to be executed after base.pc_next - e.g. next sequential instruction
63 * or a branch target.
65 uint64_t pc_tmp;
66 uint32_t ilen;
67 enum cc_op cc_op;
68 bool do_debug;
71 /* Information carried about a condition to be evaluated. */
72 typedef struct {
73 TCGCond cond:8;
74 bool is_64;
75 bool g1;
76 bool g2;
77 union {
78 struct { TCGv_i64 a, b; } s64;
79 struct { TCGv_i32 a, b; } s32;
80 } u;
81 } DisasCompare;
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit[CC_OP_MAX];
85 static uint64_t inline_branch_miss[CC_OP_MAX];
86 #endif
88 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
90 TCGv_i64 tmp;
92 if (s->base.tb->flags & FLAG_MASK_32) {
93 if (s->base.tb->flags & FLAG_MASK_64) {
94 tcg_gen_movi_i64(out, pc);
95 return;
97 pc |= 0x80000000;
99 assert(!(s->base.tb->flags & FLAG_MASK_64));
100 tmp = tcg_const_i64(pc);
101 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
102 tcg_temp_free_i64(tmp);
105 static TCGv_i64 psw_addr;
106 static TCGv_i64 psw_mask;
107 static TCGv_i64 gbea;
109 static TCGv_i32 cc_op;
110 static TCGv_i64 cc_src;
111 static TCGv_i64 cc_dst;
112 static TCGv_i64 cc_vr;
114 static char cpu_reg_names[16][4];
115 static TCGv_i64 regs[16];
117 void s390x_translate_init(void)
119 int i;
121 psw_addr = tcg_global_mem_new_i64(cpu_env,
122 offsetof(CPUS390XState, psw.addr),
123 "psw_addr");
124 psw_mask = tcg_global_mem_new_i64(cpu_env,
125 offsetof(CPUS390XState, psw.mask),
126 "psw_mask");
127 gbea = tcg_global_mem_new_i64(cpu_env,
128 offsetof(CPUS390XState, gbea),
129 "gbea");
131 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
132 "cc_op");
133 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
134 "cc_src");
135 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
136 "cc_dst");
137 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
138 "cc_vr");
140 for (i = 0; i < 16; i++) {
141 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
142 regs[i] = tcg_global_mem_new(cpu_env,
143 offsetof(CPUS390XState, regs[i]),
144 cpu_reg_names[i]);
148 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp size)
150 const uint8_t es = 1 << size;
151 int offs = enr * es;
153 g_assert(reg < 32);
155 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
156 * of the 16 byte vector, on both, little and big endian systems.
158 * Big Endian (target/possible host)
159 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
160 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
161 * W: [ 0][ 1] - [ 2][ 3]
162 * DW: [ 0] - [ 1]
164 * Little Endian (possible host)
165 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
166 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
167 * W: [ 1][ 0] - [ 3][ 2]
168 * DW: [ 0] - [ 1]
170 * For 16 byte elements, the two 8 byte halves will not form a host
171 * int128 if the host is little endian, since they're in the wrong order.
172 * Some operations (e.g. xor) do not care. For operations like addition,
173 * the two 8 byte elements have to be loaded separately. Let's force all
174 * 16 byte operations to handle it in a special way.
176 g_assert(size <= MO_64);
177 #ifndef HOST_WORDS_BIGENDIAN
178 offs ^= (8 - es);
179 #endif
180 return offs + offsetof(CPUS390XState, vregs[reg][0].d);
183 static inline int freg64_offset(uint8_t reg)
185 g_assert(reg < 16);
186 return vec_reg_offset(reg, 0, MO_64);
189 static inline int freg32_offset(uint8_t reg)
191 g_assert(reg < 16);
192 return vec_reg_offset(reg, 0, MO_32);
195 static TCGv_i64 load_reg(int reg)
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
202 static TCGv_i64 load_freg(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
207 return r;
210 static TCGv_i64 load_freg32_i64(int reg)
212 TCGv_i64 r = tcg_temp_new_i64();
214 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
215 return r;
218 static void store_reg(int reg, TCGv_i64 v)
220 tcg_gen_mov_i64(regs[reg], v);
223 static void store_freg(int reg, TCGv_i64 v)
225 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
228 static void store_reg32_i64(int reg, TCGv_i64 v)
230 /* 32 bit register writes keep the upper half */
231 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
234 static void store_reg32h_i64(int reg, TCGv_i64 v)
236 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
239 static void store_freg32_i64(int reg, TCGv_i64 v)
241 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
244 static void return_low128(TCGv_i64 dest)
246 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
249 static void update_psw_addr(DisasContext *s)
251 /* psw.addr */
252 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
255 static void per_branch(DisasContext *s, bool to_next)
257 #ifndef CONFIG_USER_ONLY
258 tcg_gen_movi_i64(gbea, s->base.pc_next);
260 if (s->base.tb->flags & FLAG_MASK_PER) {
261 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
262 gen_helper_per_branch(cpu_env, gbea, next_pc);
263 if (to_next) {
264 tcg_temp_free_i64(next_pc);
267 #endif
270 static void per_branch_cond(DisasContext *s, TCGCond cond,
271 TCGv_i64 arg1, TCGv_i64 arg2)
273 #ifndef CONFIG_USER_ONLY
274 if (s->base.tb->flags & FLAG_MASK_PER) {
275 TCGLabel *lab = gen_new_label();
276 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
278 tcg_gen_movi_i64(gbea, s->base.pc_next);
279 gen_helper_per_branch(cpu_env, gbea, psw_addr);
281 gen_set_label(lab);
282 } else {
283 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
284 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
285 tcg_temp_free_i64(pc);
287 #endif
290 static void per_breaking_event(DisasContext *s)
292 tcg_gen_movi_i64(gbea, s->base.pc_next);
295 static void update_cc_op(DisasContext *s)
297 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
298 tcg_gen_movi_i32(cc_op, s->cc_op);
302 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
304 return (uint64_t)cpu_lduw_code(env, pc);
307 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
309 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
312 static int get_mem_index(DisasContext *s)
314 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
315 return MMU_REAL_IDX;
318 switch (s->base.tb->flags & FLAG_MASK_ASC) {
319 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
320 return MMU_PRIMARY_IDX;
321 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
322 return MMU_SECONDARY_IDX;
323 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
324 return MMU_HOME_IDX;
325 default:
326 tcg_abort();
327 break;
331 static void gen_exception(int excp)
333 TCGv_i32 tmp = tcg_const_i32(excp);
334 gen_helper_exception(cpu_env, tmp);
335 tcg_temp_free_i32(tmp);
338 static void gen_program_exception(DisasContext *s, int code)
340 TCGv_i32 tmp;
342 /* Remember what pgm exeption this was. */
343 tmp = tcg_const_i32(code);
344 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
345 tcg_temp_free_i32(tmp);
347 tmp = tcg_const_i32(s->ilen);
348 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
349 tcg_temp_free_i32(tmp);
351 /* update the psw */
352 update_psw_addr(s);
354 /* Save off cc. */
355 update_cc_op(s);
357 /* Trigger exception. */
358 gen_exception(EXCP_PGM);
361 static inline void gen_illegal_opcode(DisasContext *s)
363 gen_program_exception(s, PGM_OPERATION);
366 static inline void gen_data_exception(uint8_t dxc)
368 TCGv_i32 tmp = tcg_const_i32(dxc);
369 gen_helper_data_exception(cpu_env, tmp);
370 tcg_temp_free_i32(tmp);
373 static inline void gen_trap(DisasContext *s)
375 /* Set DXC to 0xff */
376 gen_data_exception(0xff);
379 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
381 TCGv_i64 tmp = tcg_temp_new_i64();
382 bool need_31 = !(s->base.tb->flags & FLAG_MASK_64);
384 /* Note that d2 is limited to 20 bits, signed. If we crop negative
385 displacements early we create larger immedate addends. */
387 /* Note that addi optimizes the imm==0 case. */
388 if (b2 && x2) {
389 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
390 tcg_gen_addi_i64(tmp, tmp, d2);
391 } else if (b2) {
392 tcg_gen_addi_i64(tmp, regs[b2], d2);
393 } else if (x2) {
394 tcg_gen_addi_i64(tmp, regs[x2], d2);
395 } else {
396 if (need_31) {
397 d2 &= 0x7fffffff;
398 need_31 = false;
400 tcg_gen_movi_i64(tmp, d2);
402 if (need_31) {
403 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
406 return tmp;
409 static inline bool live_cc_data(DisasContext *s)
411 return (s->cc_op != CC_OP_DYNAMIC
412 && s->cc_op != CC_OP_STATIC
413 && s->cc_op > 3);
416 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
418 if (live_cc_data(s)) {
419 tcg_gen_discard_i64(cc_src);
420 tcg_gen_discard_i64(cc_dst);
421 tcg_gen_discard_i64(cc_vr);
423 s->cc_op = CC_OP_CONST0 + val;
426 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
428 if (live_cc_data(s)) {
429 tcg_gen_discard_i64(cc_src);
430 tcg_gen_discard_i64(cc_vr);
432 tcg_gen_mov_i64(cc_dst, dst);
433 s->cc_op = op;
436 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
437 TCGv_i64 dst)
439 if (live_cc_data(s)) {
440 tcg_gen_discard_i64(cc_vr);
442 tcg_gen_mov_i64(cc_src, src);
443 tcg_gen_mov_i64(cc_dst, dst);
444 s->cc_op = op;
447 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
448 TCGv_i64 dst, TCGv_i64 vr)
450 tcg_gen_mov_i64(cc_src, src);
451 tcg_gen_mov_i64(cc_dst, dst);
452 tcg_gen_mov_i64(cc_vr, vr);
453 s->cc_op = op;
456 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
458 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
461 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
463 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
466 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
468 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
471 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
473 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
476 /* CC value is in env->cc_op */
477 static void set_cc_static(DisasContext *s)
479 if (live_cc_data(s)) {
480 tcg_gen_discard_i64(cc_src);
481 tcg_gen_discard_i64(cc_dst);
482 tcg_gen_discard_i64(cc_vr);
484 s->cc_op = CC_OP_STATIC;
487 /* calculates cc into cc_op */
488 static void gen_op_calc_cc(DisasContext *s)
490 TCGv_i32 local_cc_op = NULL;
491 TCGv_i64 dummy = NULL;
493 switch (s->cc_op) {
494 default:
495 dummy = tcg_const_i64(0);
496 /* FALLTHRU */
497 case CC_OP_ADD_64:
498 case CC_OP_ADDU_64:
499 case CC_OP_ADDC_64:
500 case CC_OP_SUB_64:
501 case CC_OP_SUBU_64:
502 case CC_OP_SUBB_64:
503 case CC_OP_ADD_32:
504 case CC_OP_ADDU_32:
505 case CC_OP_ADDC_32:
506 case CC_OP_SUB_32:
507 case CC_OP_SUBU_32:
508 case CC_OP_SUBB_32:
509 local_cc_op = tcg_const_i32(s->cc_op);
510 break;
511 case CC_OP_CONST0:
512 case CC_OP_CONST1:
513 case CC_OP_CONST2:
514 case CC_OP_CONST3:
515 case CC_OP_STATIC:
516 case CC_OP_DYNAMIC:
517 break;
520 switch (s->cc_op) {
521 case CC_OP_CONST0:
522 case CC_OP_CONST1:
523 case CC_OP_CONST2:
524 case CC_OP_CONST3:
525 /* s->cc_op is the cc value */
526 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
527 break;
528 case CC_OP_STATIC:
529 /* env->cc_op already is the cc value */
530 break;
531 case CC_OP_NZ:
532 case CC_OP_ABS_64:
533 case CC_OP_NABS_64:
534 case CC_OP_ABS_32:
535 case CC_OP_NABS_32:
536 case CC_OP_LTGT0_32:
537 case CC_OP_LTGT0_64:
538 case CC_OP_COMP_32:
539 case CC_OP_COMP_64:
540 case CC_OP_NZ_F32:
541 case CC_OP_NZ_F64:
542 case CC_OP_FLOGR:
543 /* 1 argument */
544 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
545 break;
546 case CC_OP_ICM:
547 case CC_OP_LTGT_32:
548 case CC_OP_LTGT_64:
549 case CC_OP_LTUGTU_32:
550 case CC_OP_LTUGTU_64:
551 case CC_OP_TM_32:
552 case CC_OP_TM_64:
553 case CC_OP_SLA_32:
554 case CC_OP_SLA_64:
555 case CC_OP_NZ_F128:
556 /* 2 arguments */
557 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
558 break;
559 case CC_OP_ADD_64:
560 case CC_OP_ADDU_64:
561 case CC_OP_ADDC_64:
562 case CC_OP_SUB_64:
563 case CC_OP_SUBU_64:
564 case CC_OP_SUBB_64:
565 case CC_OP_ADD_32:
566 case CC_OP_ADDU_32:
567 case CC_OP_ADDC_32:
568 case CC_OP_SUB_32:
569 case CC_OP_SUBU_32:
570 case CC_OP_SUBB_32:
571 /* 3 arguments */
572 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
573 break;
574 case CC_OP_DYNAMIC:
575 /* unknown operation - assume 3 arguments and cc_op in env */
576 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
577 break;
578 default:
579 tcg_abort();
582 if (local_cc_op) {
583 tcg_temp_free_i32(local_cc_op);
585 if (dummy) {
586 tcg_temp_free_i64(dummy);
589 /* We now have cc in cc_op as constant */
590 set_cc_static(s);
593 static bool use_exit_tb(DisasContext *s)
595 return s->base.singlestep_enabled ||
596 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
597 (s->base.tb->flags & FLAG_MASK_PER);
600 static bool use_goto_tb(DisasContext *s, uint64_t dest)
602 if (unlikely(use_exit_tb(s))) {
603 return false;
605 #ifndef CONFIG_USER_ONLY
606 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
607 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
608 #else
609 return true;
610 #endif
613 static void account_noninline_branch(DisasContext *s, int cc_op)
615 #ifdef DEBUG_INLINE_BRANCHES
616 inline_branch_miss[cc_op]++;
617 #endif
620 static void account_inline_branch(DisasContext *s, int cc_op)
622 #ifdef DEBUG_INLINE_BRANCHES
623 inline_branch_hit[cc_op]++;
624 #endif
627 /* Table of mask values to comparison codes, given a comparison as input.
628 For such, CC=3 should not be possible. */
629 static const TCGCond ltgt_cond[16] = {
630 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
631 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
632 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
633 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
634 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
635 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
636 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
637 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
640 /* Table of mask values to comparison codes, given a logic op as input.
641 For such, only CC=0 and CC=1 should be possible. */
642 static const TCGCond nz_cond[16] = {
643 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
644 TCG_COND_NEVER, TCG_COND_NEVER,
645 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
646 TCG_COND_NE, TCG_COND_NE,
647 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
648 TCG_COND_EQ, TCG_COND_EQ,
649 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
650 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
653 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
654 details required to generate a TCG comparison. */
655 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
657 TCGCond cond;
658 enum cc_op old_cc_op = s->cc_op;
660 if (mask == 15 || mask == 0) {
661 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
662 c->u.s32.a = cc_op;
663 c->u.s32.b = cc_op;
664 c->g1 = c->g2 = true;
665 c->is_64 = false;
666 return;
669 /* Find the TCG condition for the mask + cc op. */
670 switch (old_cc_op) {
671 case CC_OP_LTGT0_32:
672 case CC_OP_LTGT0_64:
673 case CC_OP_LTGT_32:
674 case CC_OP_LTGT_64:
675 cond = ltgt_cond[mask];
676 if (cond == TCG_COND_NEVER) {
677 goto do_dynamic;
679 account_inline_branch(s, old_cc_op);
680 break;
682 case CC_OP_LTUGTU_32:
683 case CC_OP_LTUGTU_64:
684 cond = tcg_unsigned_cond(ltgt_cond[mask]);
685 if (cond == TCG_COND_NEVER) {
686 goto do_dynamic;
688 account_inline_branch(s, old_cc_op);
689 break;
691 case CC_OP_NZ:
692 cond = nz_cond[mask];
693 if (cond == TCG_COND_NEVER) {
694 goto do_dynamic;
696 account_inline_branch(s, old_cc_op);
697 break;
699 case CC_OP_TM_32:
700 case CC_OP_TM_64:
701 switch (mask) {
702 case 8:
703 cond = TCG_COND_EQ;
704 break;
705 case 4 | 2 | 1:
706 cond = TCG_COND_NE;
707 break;
708 default:
709 goto do_dynamic;
711 account_inline_branch(s, old_cc_op);
712 break;
714 case CC_OP_ICM:
715 switch (mask) {
716 case 8:
717 cond = TCG_COND_EQ;
718 break;
719 case 4 | 2 | 1:
720 case 4 | 2:
721 cond = TCG_COND_NE;
722 break;
723 default:
724 goto do_dynamic;
726 account_inline_branch(s, old_cc_op);
727 break;
729 case CC_OP_FLOGR:
730 switch (mask & 0xa) {
731 case 8: /* src == 0 -> no one bit found */
732 cond = TCG_COND_EQ;
733 break;
734 case 2: /* src != 0 -> one bit found */
735 cond = TCG_COND_NE;
736 break;
737 default:
738 goto do_dynamic;
740 account_inline_branch(s, old_cc_op);
741 break;
743 case CC_OP_ADDU_32:
744 case CC_OP_ADDU_64:
745 switch (mask) {
746 case 8 | 2: /* vr == 0 */
747 cond = TCG_COND_EQ;
748 break;
749 case 4 | 1: /* vr != 0 */
750 cond = TCG_COND_NE;
751 break;
752 case 8 | 4: /* no carry -> vr >= src */
753 cond = TCG_COND_GEU;
754 break;
755 case 2 | 1: /* carry -> vr < src */
756 cond = TCG_COND_LTU;
757 break;
758 default:
759 goto do_dynamic;
761 account_inline_branch(s, old_cc_op);
762 break;
764 case CC_OP_SUBU_32:
765 case CC_OP_SUBU_64:
766 /* Note that CC=0 is impossible; treat it as dont-care. */
767 switch (mask & 7) {
768 case 2: /* zero -> op1 == op2 */
769 cond = TCG_COND_EQ;
770 break;
771 case 4 | 1: /* !zero -> op1 != op2 */
772 cond = TCG_COND_NE;
773 break;
774 case 4: /* borrow (!carry) -> op1 < op2 */
775 cond = TCG_COND_LTU;
776 break;
777 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
778 cond = TCG_COND_GEU;
779 break;
780 default:
781 goto do_dynamic;
783 account_inline_branch(s, old_cc_op);
784 break;
786 default:
787 do_dynamic:
788 /* Calculate cc value. */
789 gen_op_calc_cc(s);
790 /* FALLTHRU */
792 case CC_OP_STATIC:
793 /* Jump based on CC. We'll load up the real cond below;
794 the assignment here merely avoids a compiler warning. */
795 account_noninline_branch(s, old_cc_op);
796 old_cc_op = CC_OP_STATIC;
797 cond = TCG_COND_NEVER;
798 break;
801 /* Load up the arguments of the comparison. */
802 c->is_64 = true;
803 c->g1 = c->g2 = false;
804 switch (old_cc_op) {
805 case CC_OP_LTGT0_32:
806 c->is_64 = false;
807 c->u.s32.a = tcg_temp_new_i32();
808 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
809 c->u.s32.b = tcg_const_i32(0);
810 break;
811 case CC_OP_LTGT_32:
812 case CC_OP_LTUGTU_32:
813 case CC_OP_SUBU_32:
814 c->is_64 = false;
815 c->u.s32.a = tcg_temp_new_i32();
816 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
817 c->u.s32.b = tcg_temp_new_i32();
818 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
819 break;
821 case CC_OP_LTGT0_64:
822 case CC_OP_NZ:
823 case CC_OP_FLOGR:
824 c->u.s64.a = cc_dst;
825 c->u.s64.b = tcg_const_i64(0);
826 c->g1 = true;
827 break;
828 case CC_OP_LTGT_64:
829 case CC_OP_LTUGTU_64:
830 case CC_OP_SUBU_64:
831 c->u.s64.a = cc_src;
832 c->u.s64.b = cc_dst;
833 c->g1 = c->g2 = true;
834 break;
836 case CC_OP_TM_32:
837 case CC_OP_TM_64:
838 case CC_OP_ICM:
839 c->u.s64.a = tcg_temp_new_i64();
840 c->u.s64.b = tcg_const_i64(0);
841 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
842 break;
844 case CC_OP_ADDU_32:
845 c->is_64 = false;
846 c->u.s32.a = tcg_temp_new_i32();
847 c->u.s32.b = tcg_temp_new_i32();
848 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
849 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
850 tcg_gen_movi_i32(c->u.s32.b, 0);
851 } else {
852 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
854 break;
856 case CC_OP_ADDU_64:
857 c->u.s64.a = cc_vr;
858 c->g1 = true;
859 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
860 c->u.s64.b = tcg_const_i64(0);
861 } else {
862 c->u.s64.b = cc_src;
863 c->g2 = true;
865 break;
867 case CC_OP_STATIC:
868 c->is_64 = false;
869 c->u.s32.a = cc_op;
870 c->g1 = true;
871 switch (mask) {
872 case 0x8 | 0x4 | 0x2: /* cc != 3 */
873 cond = TCG_COND_NE;
874 c->u.s32.b = tcg_const_i32(3);
875 break;
876 case 0x8 | 0x4 | 0x1: /* cc != 2 */
877 cond = TCG_COND_NE;
878 c->u.s32.b = tcg_const_i32(2);
879 break;
880 case 0x8 | 0x2 | 0x1: /* cc != 1 */
881 cond = TCG_COND_NE;
882 c->u.s32.b = tcg_const_i32(1);
883 break;
884 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
885 cond = TCG_COND_EQ;
886 c->g1 = false;
887 c->u.s32.a = tcg_temp_new_i32();
888 c->u.s32.b = tcg_const_i32(0);
889 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
890 break;
891 case 0x8 | 0x4: /* cc < 2 */
892 cond = TCG_COND_LTU;
893 c->u.s32.b = tcg_const_i32(2);
894 break;
895 case 0x8: /* cc == 0 */
896 cond = TCG_COND_EQ;
897 c->u.s32.b = tcg_const_i32(0);
898 break;
899 case 0x4 | 0x2 | 0x1: /* cc != 0 */
900 cond = TCG_COND_NE;
901 c->u.s32.b = tcg_const_i32(0);
902 break;
903 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
904 cond = TCG_COND_NE;
905 c->g1 = false;
906 c->u.s32.a = tcg_temp_new_i32();
907 c->u.s32.b = tcg_const_i32(0);
908 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
909 break;
910 case 0x4: /* cc == 1 */
911 cond = TCG_COND_EQ;
912 c->u.s32.b = tcg_const_i32(1);
913 break;
914 case 0x2 | 0x1: /* cc > 1 */
915 cond = TCG_COND_GTU;
916 c->u.s32.b = tcg_const_i32(1);
917 break;
918 case 0x2: /* cc == 2 */
919 cond = TCG_COND_EQ;
920 c->u.s32.b = tcg_const_i32(2);
921 break;
922 case 0x1: /* cc == 3 */
923 cond = TCG_COND_EQ;
924 c->u.s32.b = tcg_const_i32(3);
925 break;
926 default:
927 /* CC is masked by something else: (8 >> cc) & mask. */
928 cond = TCG_COND_NE;
929 c->g1 = false;
930 c->u.s32.a = tcg_const_i32(8);
931 c->u.s32.b = tcg_const_i32(0);
932 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
933 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
934 break;
936 break;
938 default:
939 abort();
941 c->cond = cond;
944 static void free_compare(DisasCompare *c)
946 if (!c->g1) {
947 if (c->is_64) {
948 tcg_temp_free_i64(c->u.s64.a);
949 } else {
950 tcg_temp_free_i32(c->u.s32.a);
953 if (!c->g2) {
954 if (c->is_64) {
955 tcg_temp_free_i64(c->u.s64.b);
956 } else {
957 tcg_temp_free_i32(c->u.s32.b);
962 /* ====================================================================== */
963 /* Define the insn format enumeration. */
964 #define F0(N) FMT_##N,
965 #define F1(N, X1) F0(N)
966 #define F2(N, X1, X2) F0(N)
967 #define F3(N, X1, X2, X3) F0(N)
968 #define F4(N, X1, X2, X3, X4) F0(N)
969 #define F5(N, X1, X2, X3, X4, X5) F0(N)
971 typedef enum {
972 #include "insn-format.def"
973 } DisasFormat;
975 #undef F0
976 #undef F1
977 #undef F2
978 #undef F3
979 #undef F4
980 #undef F5
982 /* Define a structure to hold the decoded fields. We'll store each inside
983 an array indexed by an enum. In order to conserve memory, we'll arrange
984 for fields that do not exist at the same time to overlap, thus the "C"
985 for compact. For checking purposes there is an "O" for original index
986 as well that will be applied to availability bitmaps. */
988 enum DisasFieldIndexO {
989 FLD_O_r1,
990 FLD_O_r2,
991 FLD_O_r3,
992 FLD_O_m1,
993 FLD_O_m3,
994 FLD_O_m4,
995 FLD_O_b1,
996 FLD_O_b2,
997 FLD_O_b4,
998 FLD_O_d1,
999 FLD_O_d2,
1000 FLD_O_d4,
1001 FLD_O_x2,
1002 FLD_O_l1,
1003 FLD_O_l2,
1004 FLD_O_i1,
1005 FLD_O_i2,
1006 FLD_O_i3,
1007 FLD_O_i4,
1008 FLD_O_i5
1011 enum DisasFieldIndexC {
1012 FLD_C_r1 = 0,
1013 FLD_C_m1 = 0,
1014 FLD_C_b1 = 0,
1015 FLD_C_i1 = 0,
1017 FLD_C_r2 = 1,
1018 FLD_C_b2 = 1,
1019 FLD_C_i2 = 1,
1021 FLD_C_r3 = 2,
1022 FLD_C_m3 = 2,
1023 FLD_C_i3 = 2,
1025 FLD_C_m4 = 3,
1026 FLD_C_b4 = 3,
1027 FLD_C_i4 = 3,
1028 FLD_C_l1 = 3,
1030 FLD_C_i5 = 4,
1031 FLD_C_d1 = 4,
1033 FLD_C_d2 = 5,
1035 FLD_C_d4 = 6,
1036 FLD_C_x2 = 6,
1037 FLD_C_l2 = 6,
1039 NUM_C_FIELD = 7
1042 struct DisasFields {
1043 uint64_t raw_insn;
1044 unsigned op:8;
1045 unsigned op2:8;
1046 unsigned presentC:16;
1047 unsigned int presentO;
1048 int c[NUM_C_FIELD];
1051 /* This is the way fields are to be accessed out of DisasFields. */
1052 #define have_field(S, F) have_field1((S), FLD_O_##F)
1053 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1055 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1057 return (f->presentO >> c) & 1;
1060 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1061 enum DisasFieldIndexC c)
1063 assert(have_field1(f, o));
1064 return f->c[c];
1067 /* Describe the layout of each field in each format. */
1068 typedef struct DisasField {
1069 unsigned int beg:8;
1070 unsigned int size:8;
1071 unsigned int type:2;
1072 unsigned int indexC:6;
1073 enum DisasFieldIndexO indexO:8;
1074 } DisasField;
1076 typedef struct DisasFormatInfo {
1077 DisasField op[NUM_C_FIELD];
1078 } DisasFormatInfo;
1080 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1081 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1082 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1083 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1084 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1085 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1086 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1087 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1088 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1089 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1090 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1091 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1092 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1093 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1095 #define F0(N) { { } },
1096 #define F1(N, X1) { { X1 } },
1097 #define F2(N, X1, X2) { { X1, X2 } },
1098 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1099 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1100 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1102 static const DisasFormatInfo format_info[] = {
1103 #include "insn-format.def"
1106 #undef F0
1107 #undef F1
1108 #undef F2
1109 #undef F3
1110 #undef F4
1111 #undef F5
1112 #undef R
1113 #undef M
1114 #undef BD
1115 #undef BXD
1116 #undef BDL
1117 #undef BXDL
1118 #undef I
1119 #undef L
1121 /* Generally, we'll extract operands into this structures, operate upon
1122 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1123 of routines below for more details. */
1124 typedef struct {
1125 bool g_out, g_out2, g_in1, g_in2;
1126 TCGv_i64 out, out2, in1, in2;
1127 TCGv_i64 addr1;
1128 } DisasOps;
1130 /* Instructions can place constraints on their operands, raising specification
1131 exceptions if they are violated. To make this easy to automate, each "in1",
1132 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1133 of the following, or 0. To make this easy to document, we'll put the
1134 SPEC_<name> defines next to <name>. */
1136 #define SPEC_r1_even 1
1137 #define SPEC_r2_even 2
1138 #define SPEC_r3_even 4
1139 #define SPEC_r1_f128 8
1140 #define SPEC_r2_f128 16
1142 /* Return values from translate_one, indicating the state of the TB. */
1144 /* We are not using a goto_tb (for whatever reason), but have updated
1145 the PC (for whatever reason), so there's no need to do it again on
1146 exiting the TB. */
1147 #define DISAS_PC_UPDATED DISAS_TARGET_0
1149 /* We have emitted one or more goto_tb. No fixup required. */
1150 #define DISAS_GOTO_TB DISAS_TARGET_1
1152 /* We have updated the PC and CC values. */
1153 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1155 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1156 updated the PC for the next instruction to be executed. */
1157 #define DISAS_PC_STALE DISAS_TARGET_3
1159 /* We are exiting the TB to the main loop. */
1160 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1163 /* Instruction flags */
1164 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1165 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1166 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1167 #define IF_BFP 0x0008 /* binary floating point instruction */
1168 #define IF_DFP 0x0010 /* decimal floating point instruction */
1169 #define IF_PRIV 0x0020 /* privileged instruction */
1171 struct DisasInsn {
1172 unsigned opc:16;
1173 unsigned flags:16;
1174 DisasFormat fmt:8;
1175 unsigned fac:8;
1176 unsigned spec:8;
1178 const char *name;
1180 /* Pre-process arguments before HELP_OP. */
1181 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1182 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1183 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1186 * Post-process output after HELP_OP.
1187 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1189 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1190 void (*help_cout)(DisasContext *, DisasOps *);
1192 /* Implement the operation itself. */
1193 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1195 uint64_t data;
1198 /* ====================================================================== */
1199 /* Miscellaneous helpers, used by several operations. */
1201 static void help_l2_shift(DisasContext *s, DisasFields *f,
1202 DisasOps *o, int mask)
1204 int b2 = get_field(f, b2);
1205 int d2 = get_field(f, d2);
1207 if (b2 == 0) {
1208 o->in2 = tcg_const_i64(d2 & mask);
1209 } else {
1210 o->in2 = get_address(s, 0, b2, d2);
1211 tcg_gen_andi_i64(o->in2, o->in2, mask);
1215 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1217 if (dest == s->pc_tmp) {
1218 per_branch(s, true);
1219 return DISAS_NEXT;
1221 if (use_goto_tb(s, dest)) {
1222 update_cc_op(s);
1223 per_breaking_event(s);
1224 tcg_gen_goto_tb(0);
1225 tcg_gen_movi_i64(psw_addr, dest);
1226 tcg_gen_exit_tb(s->base.tb, 0);
1227 return DISAS_GOTO_TB;
1228 } else {
1229 tcg_gen_movi_i64(psw_addr, dest);
1230 per_branch(s, false);
1231 return DISAS_PC_UPDATED;
1235 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1236 bool is_imm, int imm, TCGv_i64 cdest)
1238 DisasJumpType ret;
1239 uint64_t dest = s->base.pc_next + 2 * imm;
1240 TCGLabel *lab;
1242 /* Take care of the special cases first. */
1243 if (c->cond == TCG_COND_NEVER) {
1244 ret = DISAS_NEXT;
1245 goto egress;
1247 if (is_imm) {
1248 if (dest == s->pc_tmp) {
1249 /* Branch to next. */
1250 per_branch(s, true);
1251 ret = DISAS_NEXT;
1252 goto egress;
1254 if (c->cond == TCG_COND_ALWAYS) {
1255 ret = help_goto_direct(s, dest);
1256 goto egress;
1258 } else {
1259 if (!cdest) {
1260 /* E.g. bcr %r0 -> no branch. */
1261 ret = DISAS_NEXT;
1262 goto egress;
1264 if (c->cond == TCG_COND_ALWAYS) {
1265 tcg_gen_mov_i64(psw_addr, cdest);
1266 per_branch(s, false);
1267 ret = DISAS_PC_UPDATED;
1268 goto egress;
1272 if (use_goto_tb(s, s->pc_tmp)) {
1273 if (is_imm && use_goto_tb(s, dest)) {
1274 /* Both exits can use goto_tb. */
1275 update_cc_op(s);
1277 lab = gen_new_label();
1278 if (c->is_64) {
1279 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1280 } else {
1281 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1284 /* Branch not taken. */
1285 tcg_gen_goto_tb(0);
1286 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1287 tcg_gen_exit_tb(s->base.tb, 0);
1289 /* Branch taken. */
1290 gen_set_label(lab);
1291 per_breaking_event(s);
1292 tcg_gen_goto_tb(1);
1293 tcg_gen_movi_i64(psw_addr, dest);
1294 tcg_gen_exit_tb(s->base.tb, 1);
1296 ret = DISAS_GOTO_TB;
1297 } else {
1298 /* Fallthru can use goto_tb, but taken branch cannot. */
1299 /* Store taken branch destination before the brcond. This
1300 avoids having to allocate a new local temp to hold it.
1301 We'll overwrite this in the not taken case anyway. */
1302 if (!is_imm) {
1303 tcg_gen_mov_i64(psw_addr, cdest);
1306 lab = gen_new_label();
1307 if (c->is_64) {
1308 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1309 } else {
1310 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1313 /* Branch not taken. */
1314 update_cc_op(s);
1315 tcg_gen_goto_tb(0);
1316 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1317 tcg_gen_exit_tb(s->base.tb, 0);
1319 gen_set_label(lab);
1320 if (is_imm) {
1321 tcg_gen_movi_i64(psw_addr, dest);
1323 per_breaking_event(s);
1324 ret = DISAS_PC_UPDATED;
1326 } else {
1327 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1328 Most commonly we're single-stepping or some other condition that
1329 disables all use of goto_tb. Just update the PC and exit. */
1331 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1332 if (is_imm) {
1333 cdest = tcg_const_i64(dest);
1336 if (c->is_64) {
1337 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1338 cdest, next);
1339 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1340 } else {
1341 TCGv_i32 t0 = tcg_temp_new_i32();
1342 TCGv_i64 t1 = tcg_temp_new_i64();
1343 TCGv_i64 z = tcg_const_i64(0);
1344 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1345 tcg_gen_extu_i32_i64(t1, t0);
1346 tcg_temp_free_i32(t0);
1347 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1348 per_branch_cond(s, TCG_COND_NE, t1, z);
1349 tcg_temp_free_i64(t1);
1350 tcg_temp_free_i64(z);
1353 if (is_imm) {
1354 tcg_temp_free_i64(cdest);
1356 tcg_temp_free_i64(next);
1358 ret = DISAS_PC_UPDATED;
1361 egress:
1362 free_compare(c);
1363 return ret;
1366 /* ====================================================================== */
1367 /* The operations. These perform the bulk of the work for any insn,
1368 usually after the operands have been loaded and output initialized. */
1370 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1372 TCGv_i64 z, n;
1373 z = tcg_const_i64(0);
1374 n = tcg_temp_new_i64();
1375 tcg_gen_neg_i64(n, o->in2);
1376 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1377 tcg_temp_free_i64(n);
1378 tcg_temp_free_i64(z);
1379 return DISAS_NEXT;
1382 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1384 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1385 return DISAS_NEXT;
1388 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1390 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1391 return DISAS_NEXT;
1394 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1396 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1397 tcg_gen_mov_i64(o->out2, o->in2);
1398 return DISAS_NEXT;
1401 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1403 tcg_gen_add_i64(o->out, o->in1, o->in2);
1404 return DISAS_NEXT;
1407 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1409 DisasCompare cmp;
1410 TCGv_i64 carry;
1412 tcg_gen_add_i64(o->out, o->in1, o->in2);
1414 /* The carry flag is the msb of CC, therefore the branch mask that would
1415 create that comparison is 3. Feeding the generated comparison to
1416 setcond produces the carry flag that we desire. */
1417 disas_jcc(s, &cmp, 3);
1418 carry = tcg_temp_new_i64();
1419 if (cmp.is_64) {
1420 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1421 } else {
1422 TCGv_i32 t = tcg_temp_new_i32();
1423 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1424 tcg_gen_extu_i32_i64(carry, t);
1425 tcg_temp_free_i32(t);
1427 free_compare(&cmp);
1429 tcg_gen_add_i64(o->out, o->out, carry);
1430 tcg_temp_free_i64(carry);
1431 return DISAS_NEXT;
1434 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1436 o->in1 = tcg_temp_new_i64();
1438 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1439 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1440 } else {
1441 /* Perform the atomic addition in memory. */
1442 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1443 s->insn->data);
1446 /* Recompute also for atomic case: needed for setting CC. */
1447 tcg_gen_add_i64(o->out, o->in1, o->in2);
1449 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1450 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1452 return DISAS_NEXT;
1455 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1457 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1458 return DISAS_NEXT;
1461 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1463 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1464 return DISAS_NEXT;
1467 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1469 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1470 return_low128(o->out2);
1471 return DISAS_NEXT;
1474 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1476 tcg_gen_and_i64(o->out, o->in1, o->in2);
1477 return DISAS_NEXT;
1480 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1482 int shift = s->insn->data & 0xff;
1483 int size = s->insn->data >> 8;
1484 uint64_t mask = ((1ull << size) - 1) << shift;
1486 assert(!o->g_in2);
1487 tcg_gen_shli_i64(o->in2, o->in2, shift);
1488 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1489 tcg_gen_and_i64(o->out, o->in1, o->in2);
1491 /* Produce the CC from only the bits manipulated. */
1492 tcg_gen_andi_i64(cc_dst, o->out, mask);
1493 set_cc_nz_u64(s, cc_dst);
1494 return DISAS_NEXT;
1497 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1499 o->in1 = tcg_temp_new_i64();
1501 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1502 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1503 } else {
1504 /* Perform the atomic operation in memory. */
1505 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1506 s->insn->data);
1509 /* Recompute also for atomic case: needed for setting CC. */
1510 tcg_gen_and_i64(o->out, o->in1, o->in2);
1512 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1513 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1515 return DISAS_NEXT;
1518 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1520 pc_to_link_info(o->out, s, s->pc_tmp);
1521 if (o->in2) {
1522 tcg_gen_mov_i64(psw_addr, o->in2);
1523 per_branch(s, false);
1524 return DISAS_PC_UPDATED;
1525 } else {
1526 return DISAS_NEXT;
1530 static void save_link_info(DisasContext *s, DisasOps *o)
1532 TCGv_i64 t;
1534 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1535 pc_to_link_info(o->out, s, s->pc_tmp);
1536 return;
1538 gen_op_calc_cc(s);
1539 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1540 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1541 t = tcg_temp_new_i64();
1542 tcg_gen_shri_i64(t, psw_mask, 16);
1543 tcg_gen_andi_i64(t, t, 0x0f000000);
1544 tcg_gen_or_i64(o->out, o->out, t);
1545 tcg_gen_extu_i32_i64(t, cc_op);
1546 tcg_gen_shli_i64(t, t, 28);
1547 tcg_gen_or_i64(o->out, o->out, t);
1548 tcg_temp_free_i64(t);
1551 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1553 save_link_info(s, o);
1554 if (o->in2) {
1555 tcg_gen_mov_i64(psw_addr, o->in2);
1556 per_branch(s, false);
1557 return DISAS_PC_UPDATED;
1558 } else {
1559 return DISAS_NEXT;
1563 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1565 pc_to_link_info(o->out, s, s->pc_tmp);
1566 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1569 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1571 int m1 = get_field(s->fields, m1);
1572 bool is_imm = have_field(s->fields, i2);
1573 int imm = is_imm ? get_field(s->fields, i2) : 0;
1574 DisasCompare c;
1576 /* BCR with R2 = 0 causes no branching */
1577 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1578 if (m1 == 14) {
1579 /* Perform serialization */
1580 /* FIXME: check for fast-BCR-serialization facility */
1581 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1583 if (m1 == 15) {
1584 /* Perform serialization */
1585 /* FIXME: perform checkpoint-synchronisation */
1586 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1588 return DISAS_NEXT;
1591 disas_jcc(s, &c, m1);
1592 return help_branch(s, &c, is_imm, imm, o->in2);
1595 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1597 int r1 = get_field(s->fields, r1);
1598 bool is_imm = have_field(s->fields, i2);
1599 int imm = is_imm ? get_field(s->fields, i2) : 0;
1600 DisasCompare c;
1601 TCGv_i64 t;
1603 c.cond = TCG_COND_NE;
1604 c.is_64 = false;
1605 c.g1 = false;
1606 c.g2 = false;
1608 t = tcg_temp_new_i64();
1609 tcg_gen_subi_i64(t, regs[r1], 1);
1610 store_reg32_i64(r1, t);
1611 c.u.s32.a = tcg_temp_new_i32();
1612 c.u.s32.b = tcg_const_i32(0);
1613 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1614 tcg_temp_free_i64(t);
1616 return help_branch(s, &c, is_imm, imm, o->in2);
1619 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1621 int r1 = get_field(s->fields, r1);
1622 int imm = get_field(s->fields, i2);
1623 DisasCompare c;
1624 TCGv_i64 t;
1626 c.cond = TCG_COND_NE;
1627 c.is_64 = false;
1628 c.g1 = false;
1629 c.g2 = false;
1631 t = tcg_temp_new_i64();
1632 tcg_gen_shri_i64(t, regs[r1], 32);
1633 tcg_gen_subi_i64(t, t, 1);
1634 store_reg32h_i64(r1, t);
1635 c.u.s32.a = tcg_temp_new_i32();
1636 c.u.s32.b = tcg_const_i32(0);
1637 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1638 tcg_temp_free_i64(t);
1640 return help_branch(s, &c, 1, imm, o->in2);
1643 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1645 int r1 = get_field(s->fields, r1);
1646 bool is_imm = have_field(s->fields, i2);
1647 int imm = is_imm ? get_field(s->fields, i2) : 0;
1648 DisasCompare c;
1650 c.cond = TCG_COND_NE;
1651 c.is_64 = true;
1652 c.g1 = true;
1653 c.g2 = false;
1655 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1656 c.u.s64.a = regs[r1];
1657 c.u.s64.b = tcg_const_i64(0);
1659 return help_branch(s, &c, is_imm, imm, o->in2);
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1664 int r1 = get_field(s->fields, r1);
1665 int r3 = get_field(s->fields, r3);
1666 bool is_imm = have_field(s->fields, i2);
1667 int imm = is_imm ? get_field(s->fields, i2) : 0;
1668 DisasCompare c;
1669 TCGv_i64 t;
1671 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672 c.is_64 = false;
1673 c.g1 = false;
1674 c.g2 = false;
1676 t = tcg_temp_new_i64();
1677 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1678 c.u.s32.a = tcg_temp_new_i32();
1679 c.u.s32.b = tcg_temp_new_i32();
1680 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1681 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1682 store_reg32_i64(r1, t);
1683 tcg_temp_free_i64(t);
1685 return help_branch(s, &c, is_imm, imm, o->in2);
1688 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1690 int r1 = get_field(s->fields, r1);
1691 int r3 = get_field(s->fields, r3);
1692 bool is_imm = have_field(s->fields, i2);
1693 int imm = is_imm ? get_field(s->fields, i2) : 0;
1694 DisasCompare c;
1696 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1697 c.is_64 = true;
1699 if (r1 == (r3 | 1)) {
1700 c.u.s64.b = load_reg(r3 | 1);
1701 c.g2 = false;
1702 } else {
1703 c.u.s64.b = regs[r3 | 1];
1704 c.g2 = true;
1707 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1708 c.u.s64.a = regs[r1];
1709 c.g1 = true;
1711 return help_branch(s, &c, is_imm, imm, o->in2);
1714 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1716 int imm, m3 = get_field(s->fields, m3);
1717 bool is_imm;
1718 DisasCompare c;
1720 c.cond = ltgt_cond[m3];
1721 if (s->insn->data) {
1722 c.cond = tcg_unsigned_cond(c.cond);
1724 c.is_64 = c.g1 = c.g2 = true;
1725 c.u.s64.a = o->in1;
1726 c.u.s64.b = o->in2;
1728 is_imm = have_field(s->fields, i4);
1729 if (is_imm) {
1730 imm = get_field(s->fields, i4);
1731 } else {
1732 imm = 0;
1733 o->out = get_address(s, 0, get_field(s->fields, b4),
1734 get_field(s->fields, d4));
1737 return help_branch(s, &c, is_imm, imm, o->out);
1740 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1742 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1743 set_cc_static(s);
1744 return DISAS_NEXT;
1747 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1749 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1750 set_cc_static(s);
1751 return DISAS_NEXT;
1754 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1756 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1757 set_cc_static(s);
1758 return DISAS_NEXT;
1761 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1763 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1764 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1765 tcg_temp_free_i32(m3);
1766 gen_set_cc_nz_f32(s, o->in2);
1767 return DISAS_NEXT;
1770 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 gen_set_cc_nz_f64(s, o->in2);
1776 return DISAS_NEXT;
1779 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 gen_set_cc_nz_f128(s, o->in1, o->in2);
1785 return DISAS_NEXT;
1788 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 gen_set_cc_nz_f32(s, o->in2);
1794 return DISAS_NEXT;
1797 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 gen_set_cc_nz_f64(s, o->in2);
1803 return DISAS_NEXT;
1806 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1808 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1810 tcg_temp_free_i32(m3);
1811 gen_set_cc_nz_f128(s, o->in1, o->in2);
1812 return DISAS_NEXT;
1815 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1817 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1818 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1819 tcg_temp_free_i32(m3);
1820 gen_set_cc_nz_f32(s, o->in2);
1821 return DISAS_NEXT;
1824 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1828 tcg_temp_free_i32(m3);
1829 gen_set_cc_nz_f64(s, o->in2);
1830 return DISAS_NEXT;
1833 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1835 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1836 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1837 tcg_temp_free_i32(m3);
1838 gen_set_cc_nz_f128(s, o->in1, o->in2);
1839 return DISAS_NEXT;
1842 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1844 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1845 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1846 tcg_temp_free_i32(m3);
1847 gen_set_cc_nz_f32(s, o->in2);
1848 return DISAS_NEXT;
1851 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1853 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1854 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1855 tcg_temp_free_i32(m3);
1856 gen_set_cc_nz_f64(s, o->in2);
1857 return DISAS_NEXT;
1860 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1862 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1863 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1864 tcg_temp_free_i32(m3);
1865 gen_set_cc_nz_f128(s, o->in1, o->in2);
1866 return DISAS_NEXT;
1869 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1871 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1872 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1873 tcg_temp_free_i32(m3);
1874 return DISAS_NEXT;
1877 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1879 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1880 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1881 tcg_temp_free_i32(m3);
1882 return DISAS_NEXT;
1885 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1887 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1888 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1889 tcg_temp_free_i32(m3);
1890 return_low128(o->out2);
1891 return DISAS_NEXT;
1894 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1896 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1897 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1898 tcg_temp_free_i32(m3);
1899 return DISAS_NEXT;
1902 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1904 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1905 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1906 tcg_temp_free_i32(m3);
1907 return DISAS_NEXT;
1910 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1912 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1913 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1914 tcg_temp_free_i32(m3);
1915 return_low128(o->out2);
1916 return DISAS_NEXT;
1919 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1921 int r2 = get_field(s->fields, r2);
1922 TCGv_i64 len = tcg_temp_new_i64();
1924 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1925 set_cc_static(s);
1926 return_low128(o->out);
1928 tcg_gen_add_i64(regs[r2], regs[r2], len);
1929 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1930 tcg_temp_free_i64(len);
1932 return DISAS_NEXT;
1935 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1937 int l = get_field(s->fields, l1);
1938 TCGv_i32 vl;
1940 switch (l + 1) {
1941 case 1:
1942 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1943 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1944 break;
1945 case 2:
1946 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1947 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1948 break;
1949 case 4:
1950 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1951 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1952 break;
1953 case 8:
1954 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1955 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1956 break;
1957 default:
1958 vl = tcg_const_i32(l);
1959 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1960 tcg_temp_free_i32(vl);
1961 set_cc_static(s);
1962 return DISAS_NEXT;
1964 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1965 return DISAS_NEXT;
1968 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1970 int r1 = get_field(s->fields, r1);
1971 int r2 = get_field(s->fields, r2);
1972 TCGv_i32 t1, t2;
1974 /* r1 and r2 must be even. */
1975 if (r1 & 1 || r2 & 1) {
1976 gen_program_exception(s, PGM_SPECIFICATION);
1977 return DISAS_NORETURN;
1980 t1 = tcg_const_i32(r1);
1981 t2 = tcg_const_i32(r2);
1982 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1983 tcg_temp_free_i32(t1);
1984 tcg_temp_free_i32(t2);
1985 set_cc_static(s);
1986 return DISAS_NEXT;
1989 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
1991 int r1 = get_field(s->fields, r1);
1992 int r3 = get_field(s->fields, r3);
1993 TCGv_i32 t1, t3;
1995 /* r1 and r3 must be even. */
1996 if (r1 & 1 || r3 & 1) {
1997 gen_program_exception(s, PGM_SPECIFICATION);
1998 return DISAS_NORETURN;
2001 t1 = tcg_const_i32(r1);
2002 t3 = tcg_const_i32(r3);
2003 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2004 tcg_temp_free_i32(t1);
2005 tcg_temp_free_i32(t3);
2006 set_cc_static(s);
2007 return DISAS_NEXT;
2010 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2012 int r1 = get_field(s->fields, r1);
2013 int r3 = get_field(s->fields, r3);
2014 TCGv_i32 t1, t3;
2016 /* r1 and r3 must be even. */
2017 if (r1 & 1 || r3 & 1) {
2018 gen_program_exception(s, PGM_SPECIFICATION);
2019 return DISAS_NORETURN;
2022 t1 = tcg_const_i32(r1);
2023 t3 = tcg_const_i32(r3);
2024 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2025 tcg_temp_free_i32(t1);
2026 tcg_temp_free_i32(t3);
2027 set_cc_static(s);
2028 return DISAS_NEXT;
2031 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2033 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2034 TCGv_i32 t1 = tcg_temp_new_i32();
2035 tcg_gen_extrl_i64_i32(t1, o->in1);
2036 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2037 set_cc_static(s);
2038 tcg_temp_free_i32(t1);
2039 tcg_temp_free_i32(m3);
2040 return DISAS_NEXT;
2043 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2045 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2046 set_cc_static(s);
2047 return_low128(o->in2);
2048 return DISAS_NEXT;
2051 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2053 TCGv_i64 t = tcg_temp_new_i64();
2054 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2055 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2056 tcg_gen_or_i64(o->out, o->out, t);
2057 tcg_temp_free_i64(t);
2058 return DISAS_NEXT;
2061 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2063 int d2 = get_field(s->fields, d2);
2064 int b2 = get_field(s->fields, b2);
2065 TCGv_i64 addr, cc;
2067 /* Note that in1 = R3 (new value) and
2068 in2 = (zero-extended) R1 (expected value). */
2070 addr = get_address(s, 0, b2, d2);
2071 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2072 get_mem_index(s), s->insn->data | MO_ALIGN);
2073 tcg_temp_free_i64(addr);
2075 /* Are the memory and expected values (un)equal? Note that this setcond
2076 produces the output CC value, thus the NE sense of the test. */
2077 cc = tcg_temp_new_i64();
2078 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2079 tcg_gen_extrl_i64_i32(cc_op, cc);
2080 tcg_temp_free_i64(cc);
2081 set_cc_static(s);
2083 return DISAS_NEXT;
2086 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2088 int r1 = get_field(s->fields, r1);
2089 int r3 = get_field(s->fields, r3);
2090 int d2 = get_field(s->fields, d2);
2091 int b2 = get_field(s->fields, b2);
2092 DisasJumpType ret = DISAS_NEXT;
2093 TCGv_i64 addr;
2094 TCGv_i32 t_r1, t_r3;
2096 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2097 addr = get_address(s, 0, b2, d2);
2098 t_r1 = tcg_const_i32(r1);
2099 t_r3 = tcg_const_i32(r3);
2100 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2101 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2102 } else if (HAVE_CMPXCHG128) {
2103 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2104 } else {
2105 gen_helper_exit_atomic(cpu_env);
2106 ret = DISAS_NORETURN;
2108 tcg_temp_free_i64(addr);
2109 tcg_temp_free_i32(t_r1);
2110 tcg_temp_free_i32(t_r3);
2112 set_cc_static(s);
2113 return ret;
2116 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2118 int r3 = get_field(s->fields, r3);
2119 TCGv_i32 t_r3 = tcg_const_i32(r3);
2121 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2122 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2123 } else {
2124 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2126 tcg_temp_free_i32(t_r3);
2128 set_cc_static(s);
2129 return DISAS_NEXT;
2132 #ifndef CONFIG_USER_ONLY
2133 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2135 TCGMemOp mop = s->insn->data;
2136 TCGv_i64 addr, old, cc;
2137 TCGLabel *lab = gen_new_label();
2139 /* Note that in1 = R1 (zero-extended expected value),
2140 out = R1 (original reg), out2 = R1+1 (new value). */
2142 addr = tcg_temp_new_i64();
2143 old = tcg_temp_new_i64();
2144 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2145 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2146 get_mem_index(s), mop | MO_ALIGN);
2147 tcg_temp_free_i64(addr);
2149 /* Are the memory and expected values (un)equal? */
2150 cc = tcg_temp_new_i64();
2151 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2152 tcg_gen_extrl_i64_i32(cc_op, cc);
2154 /* Write back the output now, so that it happens before the
2155 following branch, so that we don't need local temps. */
2156 if ((mop & MO_SIZE) == MO_32) {
2157 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2158 } else {
2159 tcg_gen_mov_i64(o->out, old);
2161 tcg_temp_free_i64(old);
2163 /* If the comparison was equal, and the LSB of R2 was set,
2164 then we need to flush the TLB (for all cpus). */
2165 tcg_gen_xori_i64(cc, cc, 1);
2166 tcg_gen_and_i64(cc, cc, o->in2);
2167 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2168 tcg_temp_free_i64(cc);
2170 gen_helper_purge(cpu_env);
2171 gen_set_label(lab);
2173 return DISAS_NEXT;
2175 #endif
2177 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2179 TCGv_i64 t1 = tcg_temp_new_i64();
2180 TCGv_i32 t2 = tcg_temp_new_i32();
2181 tcg_gen_extrl_i64_i32(t2, o->in1);
2182 gen_helper_cvd(t1, t2);
2183 tcg_temp_free_i32(t2);
2184 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2185 tcg_temp_free_i64(t1);
2186 return DISAS_NEXT;
2189 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2191 int m3 = get_field(s->fields, m3);
2192 TCGLabel *lab = gen_new_label();
2193 TCGCond c;
2195 c = tcg_invert_cond(ltgt_cond[m3]);
2196 if (s->insn->data) {
2197 c = tcg_unsigned_cond(c);
2199 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2201 /* Trap. */
2202 gen_trap(s);
2204 gen_set_label(lab);
2205 return DISAS_NEXT;
2208 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2210 int m3 = get_field(s->fields, m3);
2211 int r1 = get_field(s->fields, r1);
2212 int r2 = get_field(s->fields, r2);
2213 TCGv_i32 tr1, tr2, chk;
2215 /* R1 and R2 must both be even. */
2216 if ((r1 | r2) & 1) {
2217 gen_program_exception(s, PGM_SPECIFICATION);
2218 return DISAS_NORETURN;
2220 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2221 m3 = 0;
2224 tr1 = tcg_const_i32(r1);
2225 tr2 = tcg_const_i32(r2);
2226 chk = tcg_const_i32(m3);
2228 switch (s->insn->data) {
2229 case 12:
2230 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2231 break;
2232 case 14:
2233 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2234 break;
2235 case 21:
2236 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2237 break;
2238 case 24:
2239 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2240 break;
2241 case 41:
2242 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2243 break;
2244 case 42:
2245 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2246 break;
2247 default:
2248 g_assert_not_reached();
2251 tcg_temp_free_i32(tr1);
2252 tcg_temp_free_i32(tr2);
2253 tcg_temp_free_i32(chk);
2254 set_cc_static(s);
2255 return DISAS_NEXT;
2258 #ifndef CONFIG_USER_ONLY
2259 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2261 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2262 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2263 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2265 gen_helper_diag(cpu_env, r1, r3, func_code);
2267 tcg_temp_free_i32(func_code);
2268 tcg_temp_free_i32(r3);
2269 tcg_temp_free_i32(r1);
2270 return DISAS_NEXT;
2272 #endif
2274 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2276 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2277 return_low128(o->out);
2278 return DISAS_NEXT;
2281 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2283 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2284 return_low128(o->out);
2285 return DISAS_NEXT;
2288 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2290 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2291 return_low128(o->out);
2292 return DISAS_NEXT;
2295 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2297 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2298 return_low128(o->out);
2299 return DISAS_NEXT;
2302 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2304 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2305 return DISAS_NEXT;
2308 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2310 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2311 return DISAS_NEXT;
2314 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2316 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2317 return_low128(o->out2);
2318 return DISAS_NEXT;
2321 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2323 int r2 = get_field(s->fields, r2);
2324 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2325 return DISAS_NEXT;
2328 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2330 /* No cache information provided. */
2331 tcg_gen_movi_i64(o->out, -1);
2332 return DISAS_NEXT;
2335 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2337 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2338 return DISAS_NEXT;
2341 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2343 int r1 = get_field(s->fields, r1);
2344 int r2 = get_field(s->fields, r2);
2345 TCGv_i64 t = tcg_temp_new_i64();
2347 /* Note the "subsequently" in the PoO, which implies a defined result
2348 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2349 tcg_gen_shri_i64(t, psw_mask, 32);
2350 store_reg32_i64(r1, t);
2351 if (r2 != 0) {
2352 store_reg32_i64(r2, psw_mask);
2355 tcg_temp_free_i64(t);
2356 return DISAS_NEXT;
2359 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2361 int r1 = get_field(s->fields, r1);
2362 TCGv_i32 ilen;
2363 TCGv_i64 v1;
2365 /* Nested EXECUTE is not allowed. */
2366 if (unlikely(s->ex_value)) {
2367 gen_program_exception(s, PGM_EXECUTE);
2368 return DISAS_NORETURN;
2371 update_psw_addr(s);
2372 update_cc_op(s);
2374 if (r1 == 0) {
2375 v1 = tcg_const_i64(0);
2376 } else {
2377 v1 = regs[r1];
2380 ilen = tcg_const_i32(s->ilen);
2381 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2382 tcg_temp_free_i32(ilen);
2384 if (r1 == 0) {
2385 tcg_temp_free_i64(v1);
2388 return DISAS_PC_CC_UPDATED;
2391 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2393 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2394 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2395 tcg_temp_free_i32(m3);
2396 return DISAS_NEXT;
2399 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2401 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2402 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2403 tcg_temp_free_i32(m3);
2404 return DISAS_NEXT;
2407 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2409 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2410 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2411 return_low128(o->out2);
2412 tcg_temp_free_i32(m3);
2413 return DISAS_NEXT;
2416 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2418 /* We'll use the original input for cc computation, since we get to
2419 compare that against 0, which ought to be better than comparing
2420 the real output against 64. It also lets cc_dst be a convenient
2421 temporary during our computation. */
2422 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2424 /* R1 = IN ? CLZ(IN) : 64. */
2425 tcg_gen_clzi_i64(o->out, o->in2, 64);
2427 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2428 value by 64, which is undefined. But since the shift is 64 iff the
2429 input is zero, we still get the correct result after and'ing. */
2430 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2431 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2432 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2433 return DISAS_NEXT;
2436 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2438 int m3 = get_field(s->fields, m3);
2439 int pos, len, base = s->insn->data;
2440 TCGv_i64 tmp = tcg_temp_new_i64();
2441 uint64_t ccm;
2443 switch (m3) {
2444 case 0xf:
2445 /* Effectively a 32-bit load. */
2446 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2447 len = 32;
2448 goto one_insert;
2450 case 0xc:
2451 case 0x6:
2452 case 0x3:
2453 /* Effectively a 16-bit load. */
2454 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2455 len = 16;
2456 goto one_insert;
2458 case 0x8:
2459 case 0x4:
2460 case 0x2:
2461 case 0x1:
2462 /* Effectively an 8-bit load. */
2463 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2464 len = 8;
2465 goto one_insert;
2467 one_insert:
2468 pos = base + ctz32(m3) * 8;
2469 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2470 ccm = ((1ull << len) - 1) << pos;
2471 break;
2473 default:
2474 /* This is going to be a sequence of loads and inserts. */
2475 pos = base + 32 - 8;
2476 ccm = 0;
2477 while (m3) {
2478 if (m3 & 0x8) {
2479 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2480 tcg_gen_addi_i64(o->in2, o->in2, 1);
2481 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2482 ccm |= 0xff << pos;
2484 m3 = (m3 << 1) & 0xf;
2485 pos -= 8;
2487 break;
2490 tcg_gen_movi_i64(tmp, ccm);
2491 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2492 tcg_temp_free_i64(tmp);
2493 return DISAS_NEXT;
2496 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2498 int shift = s->insn->data & 0xff;
2499 int size = s->insn->data >> 8;
2500 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2501 return DISAS_NEXT;
2504 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2506 TCGv_i64 t1, t2;
2508 gen_op_calc_cc(s);
2509 t1 = tcg_temp_new_i64();
2510 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2511 t2 = tcg_temp_new_i64();
2512 tcg_gen_extu_i32_i64(t2, cc_op);
2513 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2514 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2515 tcg_temp_free_i64(t1);
2516 tcg_temp_free_i64(t2);
2517 return DISAS_NEXT;
2520 #ifndef CONFIG_USER_ONLY
2521 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2523 TCGv_i32 m4;
2525 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2526 m4 = tcg_const_i32(get_field(s->fields, m4));
2527 } else {
2528 m4 = tcg_const_i32(0);
2530 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2531 tcg_temp_free_i32(m4);
2532 return DISAS_NEXT;
2535 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2537 TCGv_i32 m4;
2539 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2540 m4 = tcg_const_i32(get_field(s->fields, m4));
2541 } else {
2542 m4 = tcg_const_i32(0);
2544 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2545 tcg_temp_free_i32(m4);
2546 return DISAS_NEXT;
2549 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2551 gen_helper_iske(o->out, cpu_env, o->in2);
2552 return DISAS_NEXT;
2554 #endif
2556 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2558 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2559 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2560 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2561 TCGv_i32 t_r1, t_r2, t_r3, type;
2563 switch (s->insn->data) {
2564 case S390_FEAT_TYPE_KMCTR:
2565 if (r3 & 1 || !r3) {
2566 gen_program_exception(s, PGM_SPECIFICATION);
2567 return DISAS_NORETURN;
2569 /* FALL THROUGH */
2570 case S390_FEAT_TYPE_PPNO:
2571 case S390_FEAT_TYPE_KMF:
2572 case S390_FEAT_TYPE_KMC:
2573 case S390_FEAT_TYPE_KMO:
2574 case S390_FEAT_TYPE_KM:
2575 if (r1 & 1 || !r1) {
2576 gen_program_exception(s, PGM_SPECIFICATION);
2577 return DISAS_NORETURN;
2579 /* FALL THROUGH */
2580 case S390_FEAT_TYPE_KMAC:
2581 case S390_FEAT_TYPE_KIMD:
2582 case S390_FEAT_TYPE_KLMD:
2583 if (r2 & 1 || !r2) {
2584 gen_program_exception(s, PGM_SPECIFICATION);
2585 return DISAS_NORETURN;
2587 /* FALL THROUGH */
2588 case S390_FEAT_TYPE_PCKMO:
2589 case S390_FEAT_TYPE_PCC:
2590 break;
2591 default:
2592 g_assert_not_reached();
2595 t_r1 = tcg_const_i32(r1);
2596 t_r2 = tcg_const_i32(r2);
2597 t_r3 = tcg_const_i32(r3);
2598 type = tcg_const_i32(s->insn->data);
2599 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2600 set_cc_static(s);
2601 tcg_temp_free_i32(t_r1);
2602 tcg_temp_free_i32(t_r2);
2603 tcg_temp_free_i32(t_r3);
2604 tcg_temp_free_i32(type);
2605 return DISAS_NEXT;
2608 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2610 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2611 set_cc_static(s);
2612 return DISAS_NEXT;
2615 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2617 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2618 set_cc_static(s);
2619 return DISAS_NEXT;
2622 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2624 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2625 set_cc_static(s);
2626 return DISAS_NEXT;
2629 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2631 /* The real output is indeed the original value in memory;
2632 recompute the addition for the computation of CC. */
2633 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2634 s->insn->data | MO_ALIGN);
2635 /* However, we need to recompute the addition for setting CC. */
2636 tcg_gen_add_i64(o->out, o->in1, o->in2);
2637 return DISAS_NEXT;
2640 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2642 /* The real output is indeed the original value in memory;
2643 recompute the addition for the computation of CC. */
2644 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2645 s->insn->data | MO_ALIGN);
2646 /* However, we need to recompute the operation for setting CC. */
2647 tcg_gen_and_i64(o->out, o->in1, o->in2);
2648 return DISAS_NEXT;
2651 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2653 /* The real output is indeed the original value in memory;
2654 recompute the addition for the computation of CC. */
2655 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2656 s->insn->data | MO_ALIGN);
2657 /* However, we need to recompute the operation for setting CC. */
2658 tcg_gen_or_i64(o->out, o->in1, o->in2);
2659 return DISAS_NEXT;
2662 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2664 /* The real output is indeed the original value in memory;
2665 recompute the addition for the computation of CC. */
2666 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2667 s->insn->data | MO_ALIGN);
2668 /* However, we need to recompute the operation for setting CC. */
2669 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2670 return DISAS_NEXT;
2673 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2675 gen_helper_ldeb(o->out, cpu_env, o->in2);
2676 return DISAS_NEXT;
2679 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2681 gen_helper_ledb(o->out, cpu_env, o->in2);
2682 return DISAS_NEXT;
2685 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2687 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2688 return DISAS_NEXT;
2691 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2693 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2694 return DISAS_NEXT;
2697 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2699 gen_helper_lxdb(o->out, cpu_env, o->in2);
2700 return_low128(o->out2);
2701 return DISAS_NEXT;
2704 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2706 gen_helper_lxeb(o->out, cpu_env, o->in2);
2707 return_low128(o->out2);
2708 return DISAS_NEXT;
2711 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2713 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2714 return DISAS_NEXT;
2717 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2719 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2720 return DISAS_NEXT;
2723 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2725 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2726 return DISAS_NEXT;
2729 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2731 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2732 return DISAS_NEXT;
2735 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2737 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2738 return DISAS_NEXT;
2741 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2743 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2744 return DISAS_NEXT;
2747 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2749 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2750 return DISAS_NEXT;
2753 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2755 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2756 return DISAS_NEXT;
2759 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2761 TCGLabel *lab = gen_new_label();
2762 store_reg32_i64(get_field(s->fields, r1), o->in2);
2763 /* The value is stored even in case of trap. */
2764 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2765 gen_trap(s);
2766 gen_set_label(lab);
2767 return DISAS_NEXT;
2770 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2772 TCGLabel *lab = gen_new_label();
2773 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2774 /* The value is stored even in case of trap. */
2775 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2776 gen_trap(s);
2777 gen_set_label(lab);
2778 return DISAS_NEXT;
2781 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2783 TCGLabel *lab = gen_new_label();
2784 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2785 /* The value is stored even in case of trap. */
2786 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2787 gen_trap(s);
2788 gen_set_label(lab);
2789 return DISAS_NEXT;
2792 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2794 TCGLabel *lab = gen_new_label();
2795 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2796 /* The value is stored even in case of trap. */
2797 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2798 gen_trap(s);
2799 gen_set_label(lab);
2800 return DISAS_NEXT;
2803 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2805 TCGLabel *lab = gen_new_label();
2806 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2807 /* The value is stored even in case of trap. */
2808 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2809 gen_trap(s);
2810 gen_set_label(lab);
2811 return DISAS_NEXT;
2814 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2816 DisasCompare c;
2818 disas_jcc(s, &c, get_field(s->fields, m3));
2820 if (c.is_64) {
2821 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2822 o->in2, o->in1);
2823 free_compare(&c);
2824 } else {
2825 TCGv_i32 t32 = tcg_temp_new_i32();
2826 TCGv_i64 t, z;
2828 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2829 free_compare(&c);
2831 t = tcg_temp_new_i64();
2832 tcg_gen_extu_i32_i64(t, t32);
2833 tcg_temp_free_i32(t32);
2835 z = tcg_const_i64(0);
2836 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2837 tcg_temp_free_i64(t);
2838 tcg_temp_free_i64(z);
2841 return DISAS_NEXT;
2844 #ifndef CONFIG_USER_ONLY
2845 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2847 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2848 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2849 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2850 tcg_temp_free_i32(r1);
2851 tcg_temp_free_i32(r3);
2852 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2853 return DISAS_PC_STALE_NOCHAIN;
2856 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2858 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2859 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2860 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2861 tcg_temp_free_i32(r1);
2862 tcg_temp_free_i32(r3);
2863 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2864 return DISAS_PC_STALE_NOCHAIN;
2867 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2869 gen_helper_lra(o->out, cpu_env, o->in2);
2870 set_cc_static(s);
2871 return DISAS_NEXT;
2874 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2876 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2877 return DISAS_NEXT;
2880 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2882 TCGv_i64 t1, t2;
2884 per_breaking_event(s);
2886 t1 = tcg_temp_new_i64();
2887 t2 = tcg_temp_new_i64();
2888 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2889 MO_TEUL | MO_ALIGN_8);
2890 tcg_gen_addi_i64(o->in2, o->in2, 4);
2891 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2892 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2893 tcg_gen_shli_i64(t1, t1, 32);
2894 gen_helper_load_psw(cpu_env, t1, t2);
2895 tcg_temp_free_i64(t1);
2896 tcg_temp_free_i64(t2);
2897 return DISAS_NORETURN;
2900 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2902 TCGv_i64 t1, t2;
2904 per_breaking_event(s);
2906 t1 = tcg_temp_new_i64();
2907 t2 = tcg_temp_new_i64();
2908 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2909 MO_TEQ | MO_ALIGN_8);
2910 tcg_gen_addi_i64(o->in2, o->in2, 8);
2911 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2912 gen_helper_load_psw(cpu_env, t1, t2);
2913 tcg_temp_free_i64(t1);
2914 tcg_temp_free_i64(t2);
2915 return DISAS_NORETURN;
2917 #endif
2919 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2921 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2922 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2923 gen_helper_lam(cpu_env, r1, o->in2, r3);
2924 tcg_temp_free_i32(r1);
2925 tcg_temp_free_i32(r3);
2926 return DISAS_NEXT;
2929 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2931 int r1 = get_field(s->fields, r1);
2932 int r3 = get_field(s->fields, r3);
2933 TCGv_i64 t1, t2;
2935 /* Only one register to read. */
2936 t1 = tcg_temp_new_i64();
2937 if (unlikely(r1 == r3)) {
2938 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2939 store_reg32_i64(r1, t1);
2940 tcg_temp_free(t1);
2941 return DISAS_NEXT;
2944 /* First load the values of the first and last registers to trigger
2945 possible page faults. */
2946 t2 = tcg_temp_new_i64();
2947 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2948 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2949 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2950 store_reg32_i64(r1, t1);
2951 store_reg32_i64(r3, t2);
2953 /* Only two registers to read. */
2954 if (((r1 + 1) & 15) == r3) {
2955 tcg_temp_free(t2);
2956 tcg_temp_free(t1);
2957 return DISAS_NEXT;
2960 /* Then load the remaining registers. Page fault can't occur. */
2961 r3 = (r3 - 1) & 15;
2962 tcg_gen_movi_i64(t2, 4);
2963 while (r1 != r3) {
2964 r1 = (r1 + 1) & 15;
2965 tcg_gen_add_i64(o->in2, o->in2, t2);
2966 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2967 store_reg32_i64(r1, t1);
2969 tcg_temp_free(t2);
2970 tcg_temp_free(t1);
2972 return DISAS_NEXT;
2975 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2977 int r1 = get_field(s->fields, r1);
2978 int r3 = get_field(s->fields, r3);
2979 TCGv_i64 t1, t2;
2981 /* Only one register to read. */
2982 t1 = tcg_temp_new_i64();
2983 if (unlikely(r1 == r3)) {
2984 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2985 store_reg32h_i64(r1, t1);
2986 tcg_temp_free(t1);
2987 return DISAS_NEXT;
2990 /* First load the values of the first and last registers to trigger
2991 possible page faults. */
2992 t2 = tcg_temp_new_i64();
2993 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2994 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2995 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2996 store_reg32h_i64(r1, t1);
2997 store_reg32h_i64(r3, t2);
2999 /* Only two registers to read. */
3000 if (((r1 + 1) & 15) == r3) {
3001 tcg_temp_free(t2);
3002 tcg_temp_free(t1);
3003 return DISAS_NEXT;
3006 /* Then load the remaining registers. Page fault can't occur. */
3007 r3 = (r3 - 1) & 15;
3008 tcg_gen_movi_i64(t2, 4);
3009 while (r1 != r3) {
3010 r1 = (r1 + 1) & 15;
3011 tcg_gen_add_i64(o->in2, o->in2, t2);
3012 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3013 store_reg32h_i64(r1, t1);
3015 tcg_temp_free(t2);
3016 tcg_temp_free(t1);
3018 return DISAS_NEXT;
3021 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3023 int r1 = get_field(s->fields, r1);
3024 int r3 = get_field(s->fields, r3);
3025 TCGv_i64 t1, t2;
3027 /* Only one register to read. */
3028 if (unlikely(r1 == r3)) {
3029 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3030 return DISAS_NEXT;
3033 /* First load the values of the first and last registers to trigger
3034 possible page faults. */
3035 t1 = tcg_temp_new_i64();
3036 t2 = tcg_temp_new_i64();
3037 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3038 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3039 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3040 tcg_gen_mov_i64(regs[r1], t1);
3041 tcg_temp_free(t2);
3043 /* Only two registers to read. */
3044 if (((r1 + 1) & 15) == r3) {
3045 tcg_temp_free(t1);
3046 return DISAS_NEXT;
3049 /* Then load the remaining registers. Page fault can't occur. */
3050 r3 = (r3 - 1) & 15;
3051 tcg_gen_movi_i64(t1, 8);
3052 while (r1 != r3) {
3053 r1 = (r1 + 1) & 15;
3054 tcg_gen_add_i64(o->in2, o->in2, t1);
3055 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3057 tcg_temp_free(t1);
3059 return DISAS_NEXT;
3062 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3064 TCGv_i64 a1, a2;
3065 TCGMemOp mop = s->insn->data;
3067 /* In a parallel context, stop the world and single step. */
3068 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3069 update_psw_addr(s);
3070 update_cc_op(s);
3071 gen_exception(EXCP_ATOMIC);
3072 return DISAS_NORETURN;
3075 /* In a serial context, perform the two loads ... */
3076 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3077 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3078 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3079 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3080 tcg_temp_free_i64(a1);
3081 tcg_temp_free_i64(a2);
3083 /* ... and indicate that we performed them while interlocked. */
3084 gen_op_movi_cc(s, 0);
3085 return DISAS_NEXT;
3088 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3090 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3091 gen_helper_lpq(o->out, cpu_env, o->in2);
3092 } else if (HAVE_ATOMIC128) {
3093 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3094 } else {
3095 gen_helper_exit_atomic(cpu_env);
3096 return DISAS_NORETURN;
3098 return_low128(o->out2);
3099 return DISAS_NEXT;
3102 #ifndef CONFIG_USER_ONLY
3103 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3105 gen_helper_lura(o->out, cpu_env, o->in2);
3106 return DISAS_NEXT;
3109 static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3111 gen_helper_lurag(o->out, cpu_env, o->in2);
3112 return DISAS_NEXT;
3114 #endif
3116 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3118 tcg_gen_andi_i64(o->out, o->in2, -256);
3119 return DISAS_NEXT;
3122 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3124 o->out = o->in2;
3125 o->g_out = o->g_in2;
3126 o->in2 = NULL;
3127 o->g_in2 = false;
3128 return DISAS_NEXT;
3131 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3133 int b2 = get_field(s->fields, b2);
3134 TCGv ar1 = tcg_temp_new_i64();
3136 o->out = o->in2;
3137 o->g_out = o->g_in2;
3138 o->in2 = NULL;
3139 o->g_in2 = false;
3141 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3142 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3143 tcg_gen_movi_i64(ar1, 0);
3144 break;
3145 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3146 tcg_gen_movi_i64(ar1, 1);
3147 break;
3148 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3149 if (b2) {
3150 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3151 } else {
3152 tcg_gen_movi_i64(ar1, 0);
3154 break;
3155 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3156 tcg_gen_movi_i64(ar1, 2);
3157 break;
3160 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3161 tcg_temp_free_i64(ar1);
3163 return DISAS_NEXT;
3166 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3168 o->out = o->in1;
3169 o->out2 = o->in2;
3170 o->g_out = o->g_in1;
3171 o->g_out2 = o->g_in2;
3172 o->in1 = NULL;
3173 o->in2 = NULL;
3174 o->g_in1 = o->g_in2 = false;
3175 return DISAS_NEXT;
3178 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3180 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3181 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3182 tcg_temp_free_i32(l);
3183 return DISAS_NEXT;
3186 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3188 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3189 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3190 tcg_temp_free_i32(l);
3191 return DISAS_NEXT;
3194 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3196 int r1 = get_field(s->fields, r1);
3197 int r2 = get_field(s->fields, r2);
3198 TCGv_i32 t1, t2;
3200 /* r1 and r2 must be even. */
3201 if (r1 & 1 || r2 & 1) {
3202 gen_program_exception(s, PGM_SPECIFICATION);
3203 return DISAS_NORETURN;
3206 t1 = tcg_const_i32(r1);
3207 t2 = tcg_const_i32(r2);
3208 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3209 tcg_temp_free_i32(t1);
3210 tcg_temp_free_i32(t2);
3211 set_cc_static(s);
3212 return DISAS_NEXT;
3215 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3217 int r1 = get_field(s->fields, r1);
3218 int r3 = get_field(s->fields, r3);
3219 TCGv_i32 t1, t3;
3221 /* r1 and r3 must be even. */
3222 if (r1 & 1 || r3 & 1) {
3223 gen_program_exception(s, PGM_SPECIFICATION);
3224 return DISAS_NORETURN;
3227 t1 = tcg_const_i32(r1);
3228 t3 = tcg_const_i32(r3);
3229 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3230 tcg_temp_free_i32(t1);
3231 tcg_temp_free_i32(t3);
3232 set_cc_static(s);
3233 return DISAS_NEXT;
3236 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3238 int r1 = get_field(s->fields, r1);
3239 int r3 = get_field(s->fields, r3);
3240 TCGv_i32 t1, t3;
3242 /* r1 and r3 must be even. */
3243 if (r1 & 1 || r3 & 1) {
3244 gen_program_exception(s, PGM_SPECIFICATION);
3245 return DISAS_NORETURN;
3248 t1 = tcg_const_i32(r1);
3249 t3 = tcg_const_i32(r3);
3250 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3251 tcg_temp_free_i32(t1);
3252 tcg_temp_free_i32(t3);
3253 set_cc_static(s);
3254 return DISAS_NEXT;
3257 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3259 int r3 = get_field(s->fields, r3);
3260 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3261 set_cc_static(s);
3262 return DISAS_NEXT;
3265 #ifndef CONFIG_USER_ONLY
3266 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3268 int r1 = get_field(s->fields, l1);
3269 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3270 set_cc_static(s);
3271 return DISAS_NEXT;
3274 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3276 int r1 = get_field(s->fields, l1);
3277 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3278 set_cc_static(s);
3279 return DISAS_NEXT;
3281 #endif
3283 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3285 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3286 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3287 tcg_temp_free_i32(l);
3288 return DISAS_NEXT;
3291 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3293 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3294 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3295 tcg_temp_free_i32(l);
3296 return DISAS_NEXT;
3299 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3301 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3302 set_cc_static(s);
3303 return DISAS_NEXT;
3306 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3308 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3309 set_cc_static(s);
3310 return_low128(o->in2);
3311 return DISAS_NEXT;
3314 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3316 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3317 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3318 tcg_temp_free_i32(l);
3319 return DISAS_NEXT;
3322 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3324 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3325 return DISAS_NEXT;
3328 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3330 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3331 return DISAS_NEXT;
3334 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3336 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3337 return DISAS_NEXT;
3340 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3342 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3343 return DISAS_NEXT;
3346 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3348 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3349 return DISAS_NEXT;
3352 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3354 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3355 return_low128(o->out2);
3356 return DISAS_NEXT;
3359 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3361 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3362 return_low128(o->out2);
3363 return DISAS_NEXT;
3366 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3368 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3369 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3370 tcg_temp_free_i64(r3);
3371 return DISAS_NEXT;
3374 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3376 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3377 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3378 tcg_temp_free_i64(r3);
3379 return DISAS_NEXT;
3382 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3384 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3385 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3386 tcg_temp_free_i64(r3);
3387 return DISAS_NEXT;
3390 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3392 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3393 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3394 tcg_temp_free_i64(r3);
3395 return DISAS_NEXT;
3398 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3400 TCGv_i64 z, n;
3401 z = tcg_const_i64(0);
3402 n = tcg_temp_new_i64();
3403 tcg_gen_neg_i64(n, o->in2);
3404 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3405 tcg_temp_free_i64(n);
3406 tcg_temp_free_i64(z);
3407 return DISAS_NEXT;
3410 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3412 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3413 return DISAS_NEXT;
3416 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3418 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3419 return DISAS_NEXT;
3422 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3424 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3425 tcg_gen_mov_i64(o->out2, o->in2);
3426 return DISAS_NEXT;
3429 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3431 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3432 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3433 tcg_temp_free_i32(l);
3434 set_cc_static(s);
3435 return DISAS_NEXT;
3438 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3440 tcg_gen_neg_i64(o->out, o->in2);
3441 return DISAS_NEXT;
3444 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3446 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3447 return DISAS_NEXT;
3450 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3452 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3453 return DISAS_NEXT;
3456 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3458 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3459 tcg_gen_mov_i64(o->out2, o->in2);
3460 return DISAS_NEXT;
3463 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3465 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3466 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3467 tcg_temp_free_i32(l);
3468 set_cc_static(s);
3469 return DISAS_NEXT;
3472 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3474 tcg_gen_or_i64(o->out, o->in1, o->in2);
3475 return DISAS_NEXT;
3478 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3480 int shift = s->insn->data & 0xff;
3481 int size = s->insn->data >> 8;
3482 uint64_t mask = ((1ull << size) - 1) << shift;
3484 assert(!o->g_in2);
3485 tcg_gen_shli_i64(o->in2, o->in2, shift);
3486 tcg_gen_or_i64(o->out, o->in1, o->in2);
3488 /* Produce the CC from only the bits manipulated. */
3489 tcg_gen_andi_i64(cc_dst, o->out, mask);
3490 set_cc_nz_u64(s, cc_dst);
3491 return DISAS_NEXT;
3494 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3496 o->in1 = tcg_temp_new_i64();
3498 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3499 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3500 } else {
3501 /* Perform the atomic operation in memory. */
3502 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3503 s->insn->data);
3506 /* Recompute also for atomic case: needed for setting CC. */
3507 tcg_gen_or_i64(o->out, o->in1, o->in2);
3509 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3510 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3512 return DISAS_NEXT;
3515 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3517 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3518 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3519 tcg_temp_free_i32(l);
3520 return DISAS_NEXT;
3523 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3525 int l2 = get_field(s->fields, l2) + 1;
3526 TCGv_i32 l;
3528 /* The length must not exceed 32 bytes. */
3529 if (l2 > 32) {
3530 gen_program_exception(s, PGM_SPECIFICATION);
3531 return DISAS_NORETURN;
3533 l = tcg_const_i32(l2);
3534 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3535 tcg_temp_free_i32(l);
3536 return DISAS_NEXT;
3539 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3541 int l2 = get_field(s->fields, l2) + 1;
3542 TCGv_i32 l;
3544 /* The length must be even and should not exceed 64 bytes. */
3545 if ((l2 & 1) || (l2 > 64)) {
3546 gen_program_exception(s, PGM_SPECIFICATION);
3547 return DISAS_NORETURN;
3549 l = tcg_const_i32(l2);
3550 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3551 tcg_temp_free_i32(l);
3552 return DISAS_NEXT;
3555 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3557 gen_helper_popcnt(o->out, o->in2);
3558 return DISAS_NEXT;
3561 #ifndef CONFIG_USER_ONLY
3562 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3564 gen_helper_ptlb(cpu_env);
3565 return DISAS_NEXT;
3567 #endif
3569 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3571 int i3 = get_field(s->fields, i3);
3572 int i4 = get_field(s->fields, i4);
3573 int i5 = get_field(s->fields, i5);
3574 int do_zero = i4 & 0x80;
3575 uint64_t mask, imask, pmask;
3576 int pos, len, rot;
3578 /* Adjust the arguments for the specific insn. */
3579 switch (s->fields->op2) {
3580 case 0x55: /* risbg */
3581 case 0x59: /* risbgn */
3582 i3 &= 63;
3583 i4 &= 63;
3584 pmask = ~0;
3585 break;
3586 case 0x5d: /* risbhg */
3587 i3 &= 31;
3588 i4 &= 31;
3589 pmask = 0xffffffff00000000ull;
3590 break;
3591 case 0x51: /* risblg */
3592 i3 &= 31;
3593 i4 &= 31;
3594 pmask = 0x00000000ffffffffull;
3595 break;
3596 default:
3597 g_assert_not_reached();
3600 /* MASK is the set of bits to be inserted from R2.
3601 Take care for I3/I4 wraparound. */
3602 mask = pmask >> i3;
3603 if (i3 <= i4) {
3604 mask ^= pmask >> i4 >> 1;
3605 } else {
3606 mask |= ~(pmask >> i4 >> 1);
3608 mask &= pmask;
3610 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3611 insns, we need to keep the other half of the register. */
3612 imask = ~mask | ~pmask;
3613 if (do_zero) {
3614 imask = ~pmask;
3617 len = i4 - i3 + 1;
3618 pos = 63 - i4;
3619 rot = i5 & 63;
3620 if (s->fields->op2 == 0x5d) {
3621 pos += 32;
3624 /* In some cases we can implement this with extract. */
3625 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3626 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3627 return DISAS_NEXT;
3630 /* In some cases we can implement this with deposit. */
3631 if (len > 0 && (imask == 0 || ~mask == imask)) {
3632 /* Note that we rotate the bits to be inserted to the lsb, not to
3633 the position as described in the PoO. */
3634 rot = (rot - pos) & 63;
3635 } else {
3636 pos = -1;
3639 /* Rotate the input as necessary. */
3640 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3642 /* Insert the selected bits into the output. */
3643 if (pos >= 0) {
3644 if (imask == 0) {
3645 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3646 } else {
3647 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3649 } else if (imask == 0) {
3650 tcg_gen_andi_i64(o->out, o->in2, mask);
3651 } else {
3652 tcg_gen_andi_i64(o->in2, o->in2, mask);
3653 tcg_gen_andi_i64(o->out, o->out, imask);
3654 tcg_gen_or_i64(o->out, o->out, o->in2);
3656 return DISAS_NEXT;
3659 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3661 int i3 = get_field(s->fields, i3);
3662 int i4 = get_field(s->fields, i4);
3663 int i5 = get_field(s->fields, i5);
3664 uint64_t mask;
3666 /* If this is a test-only form, arrange to discard the result. */
3667 if (i3 & 0x80) {
3668 o->out = tcg_temp_new_i64();
3669 o->g_out = false;
3672 i3 &= 63;
3673 i4 &= 63;
3674 i5 &= 63;
3676 /* MASK is the set of bits to be operated on from R2.
3677 Take care for I3/I4 wraparound. */
3678 mask = ~0ull >> i3;
3679 if (i3 <= i4) {
3680 mask ^= ~0ull >> i4 >> 1;
3681 } else {
3682 mask |= ~(~0ull >> i4 >> 1);
3685 /* Rotate the input as necessary. */
3686 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3688 /* Operate. */
3689 switch (s->fields->op2) {
3690 case 0x55: /* AND */
3691 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3692 tcg_gen_and_i64(o->out, o->out, o->in2);
3693 break;
3694 case 0x56: /* OR */
3695 tcg_gen_andi_i64(o->in2, o->in2, mask);
3696 tcg_gen_or_i64(o->out, o->out, o->in2);
3697 break;
3698 case 0x57: /* XOR */
3699 tcg_gen_andi_i64(o->in2, o->in2, mask);
3700 tcg_gen_xor_i64(o->out, o->out, o->in2);
3701 break;
3702 default:
3703 abort();
3706 /* Set the CC. */
3707 tcg_gen_andi_i64(cc_dst, o->out, mask);
3708 set_cc_nz_u64(s, cc_dst);
3709 return DISAS_NEXT;
3712 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3714 tcg_gen_bswap16_i64(o->out, o->in2);
3715 return DISAS_NEXT;
3718 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3720 tcg_gen_bswap32_i64(o->out, o->in2);
3721 return DISAS_NEXT;
3724 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3726 tcg_gen_bswap64_i64(o->out, o->in2);
3727 return DISAS_NEXT;
3730 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3732 TCGv_i32 t1 = tcg_temp_new_i32();
3733 TCGv_i32 t2 = tcg_temp_new_i32();
3734 TCGv_i32 to = tcg_temp_new_i32();
3735 tcg_gen_extrl_i64_i32(t1, o->in1);
3736 tcg_gen_extrl_i64_i32(t2, o->in2);
3737 tcg_gen_rotl_i32(to, t1, t2);
3738 tcg_gen_extu_i32_i64(o->out, to);
3739 tcg_temp_free_i32(t1);
3740 tcg_temp_free_i32(t2);
3741 tcg_temp_free_i32(to);
3742 return DISAS_NEXT;
3745 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3747 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3748 return DISAS_NEXT;
3751 #ifndef CONFIG_USER_ONLY
3752 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3754 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3755 set_cc_static(s);
3756 return DISAS_NEXT;
3759 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3761 gen_helper_sacf(cpu_env, o->in2);
3762 /* Addressing mode has changed, so end the block. */
3763 return DISAS_PC_STALE;
3765 #endif
3767 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3769 int sam = s->insn->data;
3770 TCGv_i64 tsam;
3771 uint64_t mask;
3773 switch (sam) {
3774 case 0:
3775 mask = 0xffffff;
3776 break;
3777 case 1:
3778 mask = 0x7fffffff;
3779 break;
3780 default:
3781 mask = -1;
3782 break;
3785 /* Bizarre but true, we check the address of the current insn for the
3786 specification exception, not the next to be executed. Thus the PoO
3787 documents that Bad Things Happen two bytes before the end. */
3788 if (s->base.pc_next & ~mask) {
3789 gen_program_exception(s, PGM_SPECIFICATION);
3790 return DISAS_NORETURN;
3792 s->pc_tmp &= mask;
3794 tsam = tcg_const_i64(sam);
3795 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3796 tcg_temp_free_i64(tsam);
3798 /* Always exit the TB, since we (may have) changed execution mode. */
3799 return DISAS_PC_STALE;
3802 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3804 int r1 = get_field(s->fields, r1);
3805 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3806 return DISAS_NEXT;
3809 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3811 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3812 return DISAS_NEXT;
3815 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3817 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3818 return DISAS_NEXT;
3821 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3823 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3824 return_low128(o->out2);
3825 return DISAS_NEXT;
3828 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3830 gen_helper_sqeb(o->out, cpu_env, o->in2);
3831 return DISAS_NEXT;
3834 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3836 gen_helper_sqdb(o->out, cpu_env, o->in2);
3837 return DISAS_NEXT;
3840 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3842 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3843 return_low128(o->out2);
3844 return DISAS_NEXT;
3847 #ifndef CONFIG_USER_ONLY
3848 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3850 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3851 set_cc_static(s);
3852 return DISAS_NEXT;
3855 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3857 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3858 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3859 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3860 set_cc_static(s);
3861 tcg_temp_free_i32(r1);
3862 tcg_temp_free_i32(r3);
3863 return DISAS_NEXT;
3865 #endif
3867 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3869 DisasCompare c;
3870 TCGv_i64 a, h;
3871 TCGLabel *lab;
3872 int r1;
3874 disas_jcc(s, &c, get_field(s->fields, m3));
3876 /* We want to store when the condition is fulfilled, so branch
3877 out when it's not */
3878 c.cond = tcg_invert_cond(c.cond);
3880 lab = gen_new_label();
3881 if (c.is_64) {
3882 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3883 } else {
3884 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3886 free_compare(&c);
3888 r1 = get_field(s->fields, r1);
3889 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3890 switch (s->insn->data) {
3891 case 1: /* STOCG */
3892 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3893 break;
3894 case 0: /* STOC */
3895 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3896 break;
3897 case 2: /* STOCFH */
3898 h = tcg_temp_new_i64();
3899 tcg_gen_shri_i64(h, regs[r1], 32);
3900 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3901 tcg_temp_free_i64(h);
3902 break;
3903 default:
3904 g_assert_not_reached();
3906 tcg_temp_free_i64(a);
3908 gen_set_label(lab);
3909 return DISAS_NEXT;
3912 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3914 uint64_t sign = 1ull << s->insn->data;
3915 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3916 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3917 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3918 /* The arithmetic left shift is curious in that it does not affect
3919 the sign bit. Copy that over from the source unchanged. */
3920 tcg_gen_andi_i64(o->out, o->out, ~sign);
3921 tcg_gen_andi_i64(o->in1, o->in1, sign);
3922 tcg_gen_or_i64(o->out, o->out, o->in1);
3923 return DISAS_NEXT;
3926 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3928 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3929 return DISAS_NEXT;
3932 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3934 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3935 return DISAS_NEXT;
3938 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3940 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3941 return DISAS_NEXT;
3944 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3946 gen_helper_sfpc(cpu_env, o->in2);
3947 return DISAS_NEXT;
3950 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3952 gen_helper_sfas(cpu_env, o->in2);
3953 return DISAS_NEXT;
3956 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3958 int b2 = get_field(s->fields, b2);
3959 int d2 = get_field(s->fields, d2);
3960 TCGv_i64 t1 = tcg_temp_new_i64();
3961 TCGv_i64 t2 = tcg_temp_new_i64();
3962 int mask, pos, len;
3964 switch (s->fields->op2) {
3965 case 0x99: /* SRNM */
3966 pos = 0, len = 2;
3967 break;
3968 case 0xb8: /* SRNMB */
3969 pos = 0, len = 3;
3970 break;
3971 case 0xb9: /* SRNMT */
3972 pos = 4, len = 3;
3973 break;
3974 default:
3975 tcg_abort();
3977 mask = (1 << len) - 1;
3979 /* Insert the value into the appropriate field of the FPC. */
3980 if (b2 == 0) {
3981 tcg_gen_movi_i64(t1, d2 & mask);
3982 } else {
3983 tcg_gen_addi_i64(t1, regs[b2], d2);
3984 tcg_gen_andi_i64(t1, t1, mask);
3986 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3987 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3988 tcg_temp_free_i64(t1);
3990 /* Then install the new FPC to set the rounding mode in fpu_status. */
3991 gen_helper_sfpc(cpu_env, t2);
3992 tcg_temp_free_i64(t2);
3993 return DISAS_NEXT;
3996 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
3998 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3999 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4000 set_cc_static(s);
4002 tcg_gen_shri_i64(o->in1, o->in1, 24);
4003 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4004 return DISAS_NEXT;
4007 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4009 int b1 = get_field(s->fields, b1);
4010 int d1 = get_field(s->fields, d1);
4011 int b2 = get_field(s->fields, b2);
4012 int d2 = get_field(s->fields, d2);
4013 int r3 = get_field(s->fields, r3);
4014 TCGv_i64 tmp = tcg_temp_new_i64();
4016 /* fetch all operands first */
4017 o->in1 = tcg_temp_new_i64();
4018 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4019 o->in2 = tcg_temp_new_i64();
4020 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4021 o->addr1 = get_address(s, 0, r3, 0);
4023 /* load the third operand into r3 before modifying anything */
4024 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4026 /* subtract CPU timer from first operand and store in GR0 */
4027 gen_helper_stpt(tmp, cpu_env);
4028 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4030 /* store second operand in GR1 */
4031 tcg_gen_mov_i64(regs[1], o->in2);
4033 tcg_temp_free_i64(tmp);
4034 return DISAS_NEXT;
4037 #ifndef CONFIG_USER_ONLY
4038 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4040 tcg_gen_shri_i64(o->in2, o->in2, 4);
4041 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4042 return DISAS_NEXT;
4045 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4047 gen_helper_sske(cpu_env, o->in1, o->in2);
4048 return DISAS_NEXT;
4051 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4053 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4054 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4055 return DISAS_PC_STALE_NOCHAIN;
4058 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4060 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4061 return DISAS_NEXT;
4064 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4066 gen_helper_stck(o->out, cpu_env);
4067 /* ??? We don't implement clock states. */
4068 gen_op_movi_cc(s, 0);
4069 return DISAS_NEXT;
4072 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4074 TCGv_i64 c1 = tcg_temp_new_i64();
4075 TCGv_i64 c2 = tcg_temp_new_i64();
4076 TCGv_i64 todpr = tcg_temp_new_i64();
4077 gen_helper_stck(c1, cpu_env);
4078 /* 16 bit value store in an uint32_t (only valid bits set) */
4079 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4080 /* Shift the 64-bit value into its place as a zero-extended
4081 104-bit value. Note that "bit positions 64-103 are always
4082 non-zero so that they compare differently to STCK"; we set
4083 the least significant bit to 1. */
4084 tcg_gen_shli_i64(c2, c1, 56);
4085 tcg_gen_shri_i64(c1, c1, 8);
4086 tcg_gen_ori_i64(c2, c2, 0x10000);
4087 tcg_gen_or_i64(c2, c2, todpr);
4088 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4089 tcg_gen_addi_i64(o->in2, o->in2, 8);
4090 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4091 tcg_temp_free_i64(c1);
4092 tcg_temp_free_i64(c2);
4093 tcg_temp_free_i64(todpr);
4094 /* ??? We don't implement clock states. */
4095 gen_op_movi_cc(s, 0);
4096 return DISAS_NEXT;
4099 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4101 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4102 gen_helper_sck(cc_op, cpu_env, o->in1);
4103 set_cc_static(s);
4104 return DISAS_NEXT;
4107 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4109 gen_helper_sckc(cpu_env, o->in2);
4110 return DISAS_NEXT;
4113 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4115 gen_helper_sckpf(cpu_env, regs[0]);
4116 return DISAS_NEXT;
4119 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4121 gen_helper_stckc(o->out, cpu_env);
4122 return DISAS_NEXT;
4125 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4127 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4128 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4129 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4130 tcg_temp_free_i32(r1);
4131 tcg_temp_free_i32(r3);
4132 return DISAS_NEXT;
4135 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4137 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4138 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4139 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4140 tcg_temp_free_i32(r1);
4141 tcg_temp_free_i32(r3);
4142 return DISAS_NEXT;
4145 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4147 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4148 return DISAS_NEXT;
4151 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4153 gen_helper_spt(cpu_env, o->in2);
4154 return DISAS_NEXT;
4157 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4159 gen_helper_stfl(cpu_env);
4160 return DISAS_NEXT;
4163 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4165 gen_helper_stpt(o->out, cpu_env);
4166 return DISAS_NEXT;
4169 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4171 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4172 set_cc_static(s);
4173 return DISAS_NEXT;
4176 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4178 gen_helper_spx(cpu_env, o->in2);
4179 return DISAS_NEXT;
4182 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4184 gen_helper_xsch(cpu_env, regs[1]);
4185 set_cc_static(s);
4186 return DISAS_NEXT;
4189 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4191 gen_helper_csch(cpu_env, regs[1]);
4192 set_cc_static(s);
4193 return DISAS_NEXT;
4196 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4198 gen_helper_hsch(cpu_env, regs[1]);
4199 set_cc_static(s);
4200 return DISAS_NEXT;
4203 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4205 gen_helper_msch(cpu_env, regs[1], o->in2);
4206 set_cc_static(s);
4207 return DISAS_NEXT;
4210 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4212 gen_helper_rchp(cpu_env, regs[1]);
4213 set_cc_static(s);
4214 return DISAS_NEXT;
4217 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4219 gen_helper_rsch(cpu_env, regs[1]);
4220 set_cc_static(s);
4221 return DISAS_NEXT;
4224 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4226 gen_helper_sal(cpu_env, regs[1]);
4227 return DISAS_NEXT;
4230 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4232 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4233 return DISAS_NEXT;
4236 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4238 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4239 gen_op_movi_cc(s, 3);
4240 return DISAS_NEXT;
4243 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4245 /* The instruction is suppressed if not provided. */
4246 return DISAS_NEXT;
4249 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4251 gen_helper_ssch(cpu_env, regs[1], o->in2);
4252 set_cc_static(s);
4253 return DISAS_NEXT;
4256 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4258 gen_helper_stsch(cpu_env, regs[1], o->in2);
4259 set_cc_static(s);
4260 return DISAS_NEXT;
4263 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4265 gen_helper_stcrw(cpu_env, o->in2);
4266 set_cc_static(s);
4267 return DISAS_NEXT;
4270 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4272 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4273 set_cc_static(s);
4274 return DISAS_NEXT;
4277 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4279 gen_helper_tsch(cpu_env, regs[1], o->in2);
4280 set_cc_static(s);
4281 return DISAS_NEXT;
4284 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4286 gen_helper_chsc(cpu_env, o->in2);
4287 set_cc_static(s);
4288 return DISAS_NEXT;
4291 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4293 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4294 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4295 return DISAS_NEXT;
4298 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4300 uint64_t i2 = get_field(s->fields, i2);
4301 TCGv_i64 t;
4303 /* It is important to do what the instruction name says: STORE THEN.
4304 If we let the output hook perform the store then if we fault and
4305 restart, we'll have the wrong SYSTEM MASK in place. */
4306 t = tcg_temp_new_i64();
4307 tcg_gen_shri_i64(t, psw_mask, 56);
4308 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4309 tcg_temp_free_i64(t);
4311 if (s->fields->op == 0xac) {
4312 tcg_gen_andi_i64(psw_mask, psw_mask,
4313 (i2 << 56) | 0x00ffffffffffffffull);
4314 } else {
4315 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4318 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4319 return DISAS_PC_STALE_NOCHAIN;
4322 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4324 gen_helper_stura(cpu_env, o->in2, o->in1);
4325 return DISAS_NEXT;
4328 static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4330 gen_helper_sturg(cpu_env, o->in2, o->in1);
4331 return DISAS_NEXT;
4333 #endif
4335 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4337 gen_helper_stfle(cc_op, cpu_env, o->in2);
4338 set_cc_static(s);
4339 return DISAS_NEXT;
4342 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4344 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4345 return DISAS_NEXT;
4348 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4350 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4351 return DISAS_NEXT;
4354 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4356 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4357 return DISAS_NEXT;
4360 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4362 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4363 return DISAS_NEXT;
4366 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4368 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4369 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4370 gen_helper_stam(cpu_env, r1, o->in2, r3);
4371 tcg_temp_free_i32(r1);
4372 tcg_temp_free_i32(r3);
4373 return DISAS_NEXT;
4376 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4378 int m3 = get_field(s->fields, m3);
4379 int pos, base = s->insn->data;
4380 TCGv_i64 tmp = tcg_temp_new_i64();
4382 pos = base + ctz32(m3) * 8;
4383 switch (m3) {
4384 case 0xf:
4385 /* Effectively a 32-bit store. */
4386 tcg_gen_shri_i64(tmp, o->in1, pos);
4387 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4388 break;
4390 case 0xc:
4391 case 0x6:
4392 case 0x3:
4393 /* Effectively a 16-bit store. */
4394 tcg_gen_shri_i64(tmp, o->in1, pos);
4395 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4396 break;
4398 case 0x8:
4399 case 0x4:
4400 case 0x2:
4401 case 0x1:
4402 /* Effectively an 8-bit store. */
4403 tcg_gen_shri_i64(tmp, o->in1, pos);
4404 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4405 break;
4407 default:
4408 /* This is going to be a sequence of shifts and stores. */
4409 pos = base + 32 - 8;
4410 while (m3) {
4411 if (m3 & 0x8) {
4412 tcg_gen_shri_i64(tmp, o->in1, pos);
4413 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4414 tcg_gen_addi_i64(o->in2, o->in2, 1);
4416 m3 = (m3 << 1) & 0xf;
4417 pos -= 8;
4419 break;
4421 tcg_temp_free_i64(tmp);
4422 return DISAS_NEXT;
4425 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4427 int r1 = get_field(s->fields, r1);
4428 int r3 = get_field(s->fields, r3);
4429 int size = s->insn->data;
4430 TCGv_i64 tsize = tcg_const_i64(size);
4432 while (1) {
4433 if (size == 8) {
4434 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4435 } else {
4436 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4438 if (r1 == r3) {
4439 break;
4441 tcg_gen_add_i64(o->in2, o->in2, tsize);
4442 r1 = (r1 + 1) & 15;
4445 tcg_temp_free_i64(tsize);
4446 return DISAS_NEXT;
4449 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4451 int r1 = get_field(s->fields, r1);
4452 int r3 = get_field(s->fields, r3);
4453 TCGv_i64 t = tcg_temp_new_i64();
4454 TCGv_i64 t4 = tcg_const_i64(4);
4455 TCGv_i64 t32 = tcg_const_i64(32);
4457 while (1) {
4458 tcg_gen_shl_i64(t, regs[r1], t32);
4459 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4460 if (r1 == r3) {
4461 break;
4463 tcg_gen_add_i64(o->in2, o->in2, t4);
4464 r1 = (r1 + 1) & 15;
4467 tcg_temp_free_i64(t);
4468 tcg_temp_free_i64(t4);
4469 tcg_temp_free_i64(t32);
4470 return DISAS_NEXT;
4473 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4475 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4476 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4477 } else if (HAVE_ATOMIC128) {
4478 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4479 } else {
4480 gen_helper_exit_atomic(cpu_env);
4481 return DISAS_NORETURN;
4483 return DISAS_NEXT;
4486 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4488 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4489 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4491 gen_helper_srst(cpu_env, r1, r2);
4493 tcg_temp_free_i32(r1);
4494 tcg_temp_free_i32(r2);
4495 set_cc_static(s);
4496 return DISAS_NEXT;
4499 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4501 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4502 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4504 gen_helper_srstu(cpu_env, r1, r2);
4506 tcg_temp_free_i32(r1);
4507 tcg_temp_free_i32(r2);
4508 set_cc_static(s);
4509 return DISAS_NEXT;
4512 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4514 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4515 return DISAS_NEXT;
4518 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4520 DisasCompare cmp;
4521 TCGv_i64 borrow;
4523 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4525 /* The !borrow flag is the msb of CC. Since we want the inverse of
4526 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4527 disas_jcc(s, &cmp, 8 | 4);
4528 borrow = tcg_temp_new_i64();
4529 if (cmp.is_64) {
4530 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4531 } else {
4532 TCGv_i32 t = tcg_temp_new_i32();
4533 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4534 tcg_gen_extu_i32_i64(borrow, t);
4535 tcg_temp_free_i32(t);
4537 free_compare(&cmp);
4539 tcg_gen_sub_i64(o->out, o->out, borrow);
4540 tcg_temp_free_i64(borrow);
4541 return DISAS_NEXT;
4544 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4546 TCGv_i32 t;
4548 update_psw_addr(s);
4549 update_cc_op(s);
4551 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4552 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4553 tcg_temp_free_i32(t);
4555 t = tcg_const_i32(s->ilen);
4556 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4557 tcg_temp_free_i32(t);
4559 gen_exception(EXCP_SVC);
4560 return DISAS_NORETURN;
4563 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4565 int cc = 0;
4567 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4568 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4569 gen_op_movi_cc(s, cc);
4570 return DISAS_NEXT;
4573 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4575 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4576 set_cc_static(s);
4577 return DISAS_NEXT;
4580 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4582 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4583 set_cc_static(s);
4584 return DISAS_NEXT;
4587 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4589 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4590 set_cc_static(s);
4591 return DISAS_NEXT;
4594 #ifndef CONFIG_USER_ONLY
4596 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4598 gen_helper_testblock(cc_op, cpu_env, o->in2);
4599 set_cc_static(s);
4600 return DISAS_NEXT;
4603 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4605 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4606 set_cc_static(s);
4607 return DISAS_NEXT;
4610 #endif
4612 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4614 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4615 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4616 tcg_temp_free_i32(l1);
4617 set_cc_static(s);
4618 return DISAS_NEXT;
4621 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4623 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4624 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4625 tcg_temp_free_i32(l);
4626 set_cc_static(s);
4627 return DISAS_NEXT;
4630 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4632 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4633 return_low128(o->out2);
4634 set_cc_static(s);
4635 return DISAS_NEXT;
4638 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4640 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4641 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4642 tcg_temp_free_i32(l);
4643 set_cc_static(s);
4644 return DISAS_NEXT;
4647 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4649 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4650 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4651 tcg_temp_free_i32(l);
4652 set_cc_static(s);
4653 return DISAS_NEXT;
4656 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4658 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4659 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4660 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4661 TCGv_i32 tst = tcg_temp_new_i32();
4662 int m3 = get_field(s->fields, m3);
4664 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4665 m3 = 0;
4667 if (m3 & 1) {
4668 tcg_gen_movi_i32(tst, -1);
4669 } else {
4670 tcg_gen_extrl_i64_i32(tst, regs[0]);
4671 if (s->insn->opc & 3) {
4672 tcg_gen_ext8u_i32(tst, tst);
4673 } else {
4674 tcg_gen_ext16u_i32(tst, tst);
4677 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4679 tcg_temp_free_i32(r1);
4680 tcg_temp_free_i32(r2);
4681 tcg_temp_free_i32(sizes);
4682 tcg_temp_free_i32(tst);
4683 set_cc_static(s);
4684 return DISAS_NEXT;
4687 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4689 TCGv_i32 t1 = tcg_const_i32(0xff);
4690 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4691 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4692 tcg_temp_free_i32(t1);
4693 set_cc_static(s);
4694 return DISAS_NEXT;
4697 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4699 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4700 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4701 tcg_temp_free_i32(l);
4702 return DISAS_NEXT;
4705 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4707 int l1 = get_field(s->fields, l1) + 1;
4708 TCGv_i32 l;
4710 /* The length must not exceed 32 bytes. */
4711 if (l1 > 32) {
4712 gen_program_exception(s, PGM_SPECIFICATION);
4713 return DISAS_NORETURN;
4715 l = tcg_const_i32(l1);
4716 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4717 tcg_temp_free_i32(l);
4718 set_cc_static(s);
4719 return DISAS_NEXT;
4722 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4724 int l1 = get_field(s->fields, l1) + 1;
4725 TCGv_i32 l;
4727 /* The length must be even and should not exceed 64 bytes. */
4728 if ((l1 & 1) || (l1 > 64)) {
4729 gen_program_exception(s, PGM_SPECIFICATION);
4730 return DISAS_NORETURN;
4732 l = tcg_const_i32(l1);
4733 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4734 tcg_temp_free_i32(l);
4735 set_cc_static(s);
4736 return DISAS_NEXT;
4740 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4742 int d1 = get_field(s->fields, d1);
4743 int d2 = get_field(s->fields, d2);
4744 int b1 = get_field(s->fields, b1);
4745 int b2 = get_field(s->fields, b2);
4746 int l = get_field(s->fields, l1);
4747 TCGv_i32 t32;
4749 o->addr1 = get_address(s, 0, b1, d1);
4751 /* If the addresses are identical, this is a store/memset of zero. */
4752 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4753 o->in2 = tcg_const_i64(0);
4755 l++;
4756 while (l >= 8) {
4757 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4758 l -= 8;
4759 if (l > 0) {
4760 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4763 if (l >= 4) {
4764 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4765 l -= 4;
4766 if (l > 0) {
4767 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4770 if (l >= 2) {
4771 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4772 l -= 2;
4773 if (l > 0) {
4774 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4777 if (l) {
4778 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4780 gen_op_movi_cc(s, 0);
4781 return DISAS_NEXT;
4784 /* But in general we'll defer to a helper. */
4785 o->in2 = get_address(s, 0, b2, d2);
4786 t32 = tcg_const_i32(l);
4787 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4788 tcg_temp_free_i32(t32);
4789 set_cc_static(s);
4790 return DISAS_NEXT;
4793 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4795 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4796 return DISAS_NEXT;
4799 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4801 int shift = s->insn->data & 0xff;
4802 int size = s->insn->data >> 8;
4803 uint64_t mask = ((1ull << size) - 1) << shift;
4805 assert(!o->g_in2);
4806 tcg_gen_shli_i64(o->in2, o->in2, shift);
4807 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4809 /* Produce the CC from only the bits manipulated. */
4810 tcg_gen_andi_i64(cc_dst, o->out, mask);
4811 set_cc_nz_u64(s, cc_dst);
4812 return DISAS_NEXT;
4815 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4817 o->in1 = tcg_temp_new_i64();
4819 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4820 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4821 } else {
4822 /* Perform the atomic operation in memory. */
4823 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4824 s->insn->data);
4827 /* Recompute also for atomic case: needed for setting CC. */
4828 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4830 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4831 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4833 return DISAS_NEXT;
4836 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4838 o->out = tcg_const_i64(0);
4839 return DISAS_NEXT;
4842 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4844 o->out = tcg_const_i64(0);
4845 o->out2 = o->out;
4846 o->g_out2 = true;
4847 return DISAS_NEXT;
4850 #ifndef CONFIG_USER_ONLY
4851 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4853 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4855 gen_helper_clp(cpu_env, r2);
4856 tcg_temp_free_i32(r2);
4857 set_cc_static(s);
4858 return DISAS_NEXT;
4861 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4863 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4864 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4866 gen_helper_pcilg(cpu_env, r1, r2);
4867 tcg_temp_free_i32(r1);
4868 tcg_temp_free_i32(r2);
4869 set_cc_static(s);
4870 return DISAS_NEXT;
4873 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4875 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4876 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4878 gen_helper_pcistg(cpu_env, r1, r2);
4879 tcg_temp_free_i32(r1);
4880 tcg_temp_free_i32(r2);
4881 set_cc_static(s);
4882 return DISAS_NEXT;
4885 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4887 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4888 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4890 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4891 tcg_temp_free_i32(ar);
4892 tcg_temp_free_i32(r1);
4893 set_cc_static(s);
4894 return DISAS_NEXT;
4897 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4899 gen_helper_sic(cpu_env, o->in1, o->in2);
4900 return DISAS_NEXT;
4903 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4905 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4906 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4908 gen_helper_rpcit(cpu_env, r1, r2);
4909 tcg_temp_free_i32(r1);
4910 tcg_temp_free_i32(r2);
4911 set_cc_static(s);
4912 return DISAS_NEXT;
4915 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4917 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4918 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4919 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4921 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4922 tcg_temp_free_i32(ar);
4923 tcg_temp_free_i32(r1);
4924 tcg_temp_free_i32(r3);
4925 set_cc_static(s);
4926 return DISAS_NEXT;
4929 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4931 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4932 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4934 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4935 tcg_temp_free_i32(ar);
4936 tcg_temp_free_i32(r1);
4937 set_cc_static(s);
4938 return DISAS_NEXT;
4940 #endif
4942 /* ====================================================================== */
4943 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4944 the original inputs), update the various cc data structures in order to
4945 be able to compute the new condition code. */
4947 static void cout_abs32(DisasContext *s, DisasOps *o)
4949 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4952 static void cout_abs64(DisasContext *s, DisasOps *o)
4954 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4957 static void cout_adds32(DisasContext *s, DisasOps *o)
4959 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4962 static void cout_adds64(DisasContext *s, DisasOps *o)
4964 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4967 static void cout_addu32(DisasContext *s, DisasOps *o)
4969 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4972 static void cout_addu64(DisasContext *s, DisasOps *o)
4974 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4977 static void cout_addc32(DisasContext *s, DisasOps *o)
4979 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4982 static void cout_addc64(DisasContext *s, DisasOps *o)
4984 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4987 static void cout_cmps32(DisasContext *s, DisasOps *o)
4989 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4992 static void cout_cmps64(DisasContext *s, DisasOps *o)
4994 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4997 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4999 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5002 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5004 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5007 static void cout_f32(DisasContext *s, DisasOps *o)
5009 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5012 static void cout_f64(DisasContext *s, DisasOps *o)
5014 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5017 static void cout_f128(DisasContext *s, DisasOps *o)
5019 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5022 static void cout_nabs32(DisasContext *s, DisasOps *o)
5024 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5027 static void cout_nabs64(DisasContext *s, DisasOps *o)
5029 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5032 static void cout_neg32(DisasContext *s, DisasOps *o)
5034 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5037 static void cout_neg64(DisasContext *s, DisasOps *o)
5039 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5042 static void cout_nz32(DisasContext *s, DisasOps *o)
5044 tcg_gen_ext32u_i64(cc_dst, o->out);
5045 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5048 static void cout_nz64(DisasContext *s, DisasOps *o)
5050 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5053 static void cout_s32(DisasContext *s, DisasOps *o)
5055 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5058 static void cout_s64(DisasContext *s, DisasOps *o)
5060 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5063 static void cout_subs32(DisasContext *s, DisasOps *o)
5065 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5068 static void cout_subs64(DisasContext *s, DisasOps *o)
5070 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5073 static void cout_subu32(DisasContext *s, DisasOps *o)
5075 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5078 static void cout_subu64(DisasContext *s, DisasOps *o)
5080 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5083 static void cout_subb32(DisasContext *s, DisasOps *o)
5085 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5088 static void cout_subb64(DisasContext *s, DisasOps *o)
5090 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5093 static void cout_tm32(DisasContext *s, DisasOps *o)
5095 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5098 static void cout_tm64(DisasContext *s, DisasOps *o)
5100 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5103 /* ====================================================================== */
5104 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5105 with the TCG register to which we will write. Used in combination with
5106 the "wout" generators, in some cases we need a new temporary, and in
5107 some cases we can write to a TCG global. */
5109 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5111 o->out = tcg_temp_new_i64();
5113 #define SPEC_prep_new 0
5115 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5117 o->out = tcg_temp_new_i64();
5118 o->out2 = tcg_temp_new_i64();
5120 #define SPEC_prep_new_P 0
5122 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5124 o->out = regs[get_field(f, r1)];
5125 o->g_out = true;
5127 #define SPEC_prep_r1 0
5129 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5131 int r1 = get_field(f, r1);
5132 o->out = regs[r1];
5133 o->out2 = regs[r1 + 1];
5134 o->g_out = o->g_out2 = true;
5136 #define SPEC_prep_r1_P SPEC_r1_even
5138 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5139 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5141 o->out = load_freg(get_field(f, r1));
5142 o->out2 = load_freg(get_field(f, r1) + 2);
5144 #define SPEC_prep_x1 SPEC_r1_f128
5146 /* ====================================================================== */
5147 /* The "Write OUTput" generators. These generally perform some non-trivial
5148 copy of data to TCG globals, or to main memory. The trivial cases are
5149 generally handled by having a "prep" generator install the TCG global
5150 as the destination of the operation. */
5152 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5154 store_reg(get_field(f, r1), o->out);
5156 #define SPEC_wout_r1 0
5158 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5160 int r1 = get_field(f, r1);
5161 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5163 #define SPEC_wout_r1_8 0
5165 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5167 int r1 = get_field(f, r1);
5168 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5170 #define SPEC_wout_r1_16 0
5172 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5174 store_reg32_i64(get_field(f, r1), o->out);
5176 #define SPEC_wout_r1_32 0
5178 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5180 store_reg32h_i64(get_field(f, r1), o->out);
5182 #define SPEC_wout_r1_32h 0
5184 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5186 int r1 = get_field(f, r1);
5187 store_reg32_i64(r1, o->out);
5188 store_reg32_i64(r1 + 1, o->out2);
5190 #define SPEC_wout_r1_P32 SPEC_r1_even
5192 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5194 int r1 = get_field(f, r1);
5195 store_reg32_i64(r1 + 1, o->out);
5196 tcg_gen_shri_i64(o->out, o->out, 32);
5197 store_reg32_i64(r1, o->out);
5199 #define SPEC_wout_r1_D32 SPEC_r1_even
5201 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5203 int r3 = get_field(f, r3);
5204 store_reg32_i64(r3, o->out);
5205 store_reg32_i64(r3 + 1, o->out2);
5207 #define SPEC_wout_r3_P32 SPEC_r3_even
5209 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5211 int r3 = get_field(f, r3);
5212 store_reg(r3, o->out);
5213 store_reg(r3 + 1, o->out2);
5215 #define SPEC_wout_r3_P64 SPEC_r3_even
5217 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5219 store_freg32_i64(get_field(f, r1), o->out);
5221 #define SPEC_wout_e1 0
5223 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5225 store_freg(get_field(f, r1), o->out);
5227 #define SPEC_wout_f1 0
5229 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5231 int f1 = get_field(s->fields, r1);
5232 store_freg(f1, o->out);
5233 store_freg(f1 + 2, o->out2);
5235 #define SPEC_wout_x1 SPEC_r1_f128
5237 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5239 if (get_field(f, r1) != get_field(f, r2)) {
5240 store_reg32_i64(get_field(f, r1), o->out);
5243 #define SPEC_wout_cond_r1r2_32 0
5245 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5247 if (get_field(f, r1) != get_field(f, r2)) {
5248 store_freg32_i64(get_field(f, r1), o->out);
5251 #define SPEC_wout_cond_e1e2 0
5253 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5255 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5257 #define SPEC_wout_m1_8 0
5259 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5261 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5263 #define SPEC_wout_m1_16 0
5265 #ifndef CONFIG_USER_ONLY
5266 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5268 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5270 #define SPEC_wout_m1_16a 0
5271 #endif
5273 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5275 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5277 #define SPEC_wout_m1_32 0
5279 #ifndef CONFIG_USER_ONLY
5280 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5282 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5284 #define SPEC_wout_m1_32a 0
5285 #endif
5287 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5289 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5291 #define SPEC_wout_m1_64 0
5293 #ifndef CONFIG_USER_ONLY
5294 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5296 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5298 #define SPEC_wout_m1_64a 0
5299 #endif
5301 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5303 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5305 #define SPEC_wout_m2_32 0
5307 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5309 store_reg(get_field(f, r1), o->in2);
5311 #define SPEC_wout_in2_r1 0
5313 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5315 store_reg32_i64(get_field(f, r1), o->in2);
5317 #define SPEC_wout_in2_r1_32 0
5319 /* ====================================================================== */
5320 /* The "INput 1" generators. These load the first operand to an insn. */
5322 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5324 o->in1 = load_reg(get_field(f, r1));
5326 #define SPEC_in1_r1 0
5328 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5330 o->in1 = regs[get_field(f, r1)];
5331 o->g_in1 = true;
5333 #define SPEC_in1_r1_o 0
5335 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5337 o->in1 = tcg_temp_new_i64();
5338 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5340 #define SPEC_in1_r1_32s 0
5342 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5344 o->in1 = tcg_temp_new_i64();
5345 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5347 #define SPEC_in1_r1_32u 0
5349 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5351 o->in1 = tcg_temp_new_i64();
5352 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5354 #define SPEC_in1_r1_sr32 0
5356 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5358 o->in1 = load_reg(get_field(f, r1) + 1);
5360 #define SPEC_in1_r1p1 SPEC_r1_even
5362 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5364 o->in1 = tcg_temp_new_i64();
5365 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5367 #define SPEC_in1_r1p1_32s SPEC_r1_even
5369 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5371 o->in1 = tcg_temp_new_i64();
5372 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5374 #define SPEC_in1_r1p1_32u SPEC_r1_even
5376 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5378 int r1 = get_field(f, r1);
5379 o->in1 = tcg_temp_new_i64();
5380 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5382 #define SPEC_in1_r1_D32 SPEC_r1_even
5384 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5386 o->in1 = load_reg(get_field(f, r2));
5388 #define SPEC_in1_r2 0
5390 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5392 o->in1 = tcg_temp_new_i64();
5393 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5395 #define SPEC_in1_r2_sr32 0
5397 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5399 o->in1 = load_reg(get_field(f, r3));
5401 #define SPEC_in1_r3 0
5403 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5405 o->in1 = regs[get_field(f, r3)];
5406 o->g_in1 = true;
5408 #define SPEC_in1_r3_o 0
5410 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5412 o->in1 = tcg_temp_new_i64();
5413 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5415 #define SPEC_in1_r3_32s 0
5417 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5419 o->in1 = tcg_temp_new_i64();
5420 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5422 #define SPEC_in1_r3_32u 0
5424 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5426 int r3 = get_field(f, r3);
5427 o->in1 = tcg_temp_new_i64();
5428 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5430 #define SPEC_in1_r3_D32 SPEC_r3_even
5432 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5434 o->in1 = load_freg32_i64(get_field(f, r1));
5436 #define SPEC_in1_e1 0
5438 static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5440 o->in1 = load_freg(get_field(f, r1));
5442 #define SPEC_in1_f1 0
5444 /* Load the high double word of an extended (128-bit) format FP number */
5445 static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5447 o->in1 = load_freg(get_field(f, r2));
5449 #define SPEC_in1_x2h SPEC_r2_f128
5451 static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5453 o->in1 = load_freg(get_field(f, r3));
5455 #define SPEC_in1_f3 0
5457 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5459 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5461 #define SPEC_in1_la1 0
5463 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5465 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5466 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5468 #define SPEC_in1_la2 0
5470 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5472 in1_la1(s, f, o);
5473 o->in1 = tcg_temp_new_i64();
5474 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5476 #define SPEC_in1_m1_8u 0
5478 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5480 in1_la1(s, f, o);
5481 o->in1 = tcg_temp_new_i64();
5482 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5484 #define SPEC_in1_m1_16s 0
5486 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5488 in1_la1(s, f, o);
5489 o->in1 = tcg_temp_new_i64();
5490 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5492 #define SPEC_in1_m1_16u 0
5494 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5496 in1_la1(s, f, o);
5497 o->in1 = tcg_temp_new_i64();
5498 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5500 #define SPEC_in1_m1_32s 0
5502 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5504 in1_la1(s, f, o);
5505 o->in1 = tcg_temp_new_i64();
5506 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5508 #define SPEC_in1_m1_32u 0
5510 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5512 in1_la1(s, f, o);
5513 o->in1 = tcg_temp_new_i64();
5514 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5516 #define SPEC_in1_m1_64 0
5518 /* ====================================================================== */
5519 /* The "INput 2" generators. These load the second operand to an insn. */
5521 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5523 o->in2 = regs[get_field(f, r1)];
5524 o->g_in2 = true;
5526 #define SPEC_in2_r1_o 0
5528 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5530 o->in2 = tcg_temp_new_i64();
5531 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5533 #define SPEC_in2_r1_16u 0
5535 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5537 o->in2 = tcg_temp_new_i64();
5538 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5540 #define SPEC_in2_r1_32u 0
5542 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5544 int r1 = get_field(f, r1);
5545 o->in2 = tcg_temp_new_i64();
5546 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5548 #define SPEC_in2_r1_D32 SPEC_r1_even
5550 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5552 o->in2 = load_reg(get_field(f, r2));
5554 #define SPEC_in2_r2 0
5556 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5558 o->in2 = regs[get_field(f, r2)];
5559 o->g_in2 = true;
5561 #define SPEC_in2_r2_o 0
5563 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5565 int r2 = get_field(f, r2);
5566 if (r2 != 0) {
5567 o->in2 = load_reg(r2);
5570 #define SPEC_in2_r2_nz 0
5572 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5574 o->in2 = tcg_temp_new_i64();
5575 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5577 #define SPEC_in2_r2_8s 0
5579 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5581 o->in2 = tcg_temp_new_i64();
5582 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5584 #define SPEC_in2_r2_8u 0
5586 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5588 o->in2 = tcg_temp_new_i64();
5589 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5591 #define SPEC_in2_r2_16s 0
5593 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5595 o->in2 = tcg_temp_new_i64();
5596 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5598 #define SPEC_in2_r2_16u 0
5600 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5602 o->in2 = load_reg(get_field(f, r3));
5604 #define SPEC_in2_r3 0
5606 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5608 o->in2 = tcg_temp_new_i64();
5609 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5611 #define SPEC_in2_r3_sr32 0
5613 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5615 o->in2 = tcg_temp_new_i64();
5616 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5618 #define SPEC_in2_r2_32s 0
5620 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5622 o->in2 = tcg_temp_new_i64();
5623 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5625 #define SPEC_in2_r2_32u 0
5627 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5629 o->in2 = tcg_temp_new_i64();
5630 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5632 #define SPEC_in2_r2_sr32 0
5634 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5636 o->in2 = load_freg32_i64(get_field(f, r2));
5638 #define SPEC_in2_e2 0
5640 static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5642 o->in2 = load_freg(get_field(f, r2));
5644 #define SPEC_in2_f2 0
5646 /* Load the low double word of an extended (128-bit) format FP number */
5647 static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5649 o->in2 = load_freg(get_field(f, r2) + 2);
5651 #define SPEC_in2_x2l SPEC_r2_f128
5653 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5655 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5657 #define SPEC_in2_ra2 0
5659 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5661 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5662 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5664 #define SPEC_in2_a2 0
5666 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5668 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5670 #define SPEC_in2_ri2 0
5672 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5674 help_l2_shift(s, f, o, 31);
5676 #define SPEC_in2_sh32 0
5678 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5680 help_l2_shift(s, f, o, 63);
5682 #define SPEC_in2_sh64 0
5684 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5686 in2_a2(s, f, o);
5687 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5689 #define SPEC_in2_m2_8u 0
5691 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5693 in2_a2(s, f, o);
5694 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5696 #define SPEC_in2_m2_16s 0
5698 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5700 in2_a2(s, f, o);
5701 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5703 #define SPEC_in2_m2_16u 0
5705 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5707 in2_a2(s, f, o);
5708 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5710 #define SPEC_in2_m2_32s 0
5712 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5714 in2_a2(s, f, o);
5715 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5717 #define SPEC_in2_m2_32u 0
5719 #ifndef CONFIG_USER_ONLY
5720 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5722 in2_a2(s, f, o);
5723 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5725 #define SPEC_in2_m2_32ua 0
5726 #endif
5728 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5730 in2_a2(s, f, o);
5731 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5733 #define SPEC_in2_m2_64 0
5735 #ifndef CONFIG_USER_ONLY
5736 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5738 in2_a2(s, f, o);
5739 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5741 #define SPEC_in2_m2_64a 0
5742 #endif
5744 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5746 in2_ri2(s, f, o);
5747 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5749 #define SPEC_in2_mri2_16u 0
5751 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5753 in2_ri2(s, f, o);
5754 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5756 #define SPEC_in2_mri2_32s 0
5758 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5760 in2_ri2(s, f, o);
5761 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5763 #define SPEC_in2_mri2_32u 0
5765 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5767 in2_ri2(s, f, o);
5768 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5770 #define SPEC_in2_mri2_64 0
5772 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5774 o->in2 = tcg_const_i64(get_field(f, i2));
5776 #define SPEC_in2_i2 0
5778 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5780 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5782 #define SPEC_in2_i2_8u 0
5784 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5786 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5788 #define SPEC_in2_i2_16u 0
5790 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5792 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5794 #define SPEC_in2_i2_32u 0
5796 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5798 uint64_t i2 = (uint16_t)get_field(f, i2);
5799 o->in2 = tcg_const_i64(i2 << s->insn->data);
5801 #define SPEC_in2_i2_16u_shl 0
5803 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5805 uint64_t i2 = (uint32_t)get_field(f, i2);
5806 o->in2 = tcg_const_i64(i2 << s->insn->data);
5808 #define SPEC_in2_i2_32u_shl 0
5810 #ifndef CONFIG_USER_ONLY
5811 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5813 o->in2 = tcg_const_i64(s->fields->raw_insn);
5815 #define SPEC_in2_insn 0
5816 #endif
5818 /* ====================================================================== */
5820 /* Find opc within the table of insns. This is formulated as a switch
5821 statement so that (1) we get compile-time notice of cut-paste errors
5822 for duplicated opcodes, and (2) the compiler generates the binary
5823 search tree, rather than us having to post-process the table. */
5825 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5826 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5828 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5829 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5831 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5832 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5834 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5836 enum DisasInsnEnum {
5837 #include "insn-data.def"
5840 #undef E
5841 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
5842 .opc = OPC, \
5843 .flags = FL, \
5844 .fmt = FMT_##FT, \
5845 .fac = FAC_##FC, \
5846 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5847 .name = #NM, \
5848 .help_in1 = in1_##I1, \
5849 .help_in2 = in2_##I2, \
5850 .help_prep = prep_##P, \
5851 .help_wout = wout_##W, \
5852 .help_cout = cout_##CC, \
5853 .help_op = op_##OP, \
5854 .data = D \
5857 /* Allow 0 to be used for NULL in the table below. */
5858 #define in1_0 NULL
5859 #define in2_0 NULL
5860 #define prep_0 NULL
5861 #define wout_0 NULL
5862 #define cout_0 NULL
5863 #define op_0 NULL
5865 #define SPEC_in1_0 0
5866 #define SPEC_in2_0 0
5867 #define SPEC_prep_0 0
5868 #define SPEC_wout_0 0
5870 /* Give smaller names to the various facilities. */
5871 #define FAC_Z S390_FEAT_ZARCH
5872 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5873 #define FAC_DFP S390_FEAT_DFP
5874 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5875 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5876 #define FAC_EE S390_FEAT_EXECUTE_EXT
5877 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5878 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5879 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5880 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5881 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5882 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5883 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5884 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5885 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5886 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5887 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5888 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5889 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5890 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5891 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5892 #define FAC_SFLE S390_FEAT_STFLE
5893 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5894 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5895 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5896 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5897 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5898 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5899 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5900 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5901 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5902 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5903 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5904 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5905 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5906 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5907 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5908 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5910 static const DisasInsn insn_info[] = {
5911 #include "insn-data.def"
5914 #undef E
5915 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
5916 case OPC: return &insn_info[insn_ ## NM];
5918 static const DisasInsn *lookup_opc(uint16_t opc)
5920 switch (opc) {
5921 #include "insn-data.def"
5922 default:
5923 return NULL;
5927 #undef F
5928 #undef E
5929 #undef D
5930 #undef C
5932 /* Extract a field from the insn. The INSN should be left-aligned in
5933 the uint64_t so that we can more easily utilize the big-bit-endian
5934 definitions we extract from the Principals of Operation. */
5936 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5938 uint32_t r, m;
5940 if (f->size == 0) {
5941 return;
5944 /* Zero extract the field from the insn. */
5945 r = (insn << f->beg) >> (64 - f->size);
5947 /* Sign-extend, or un-swap the field as necessary. */
5948 switch (f->type) {
5949 case 0: /* unsigned */
5950 break;
5951 case 1: /* signed */
5952 assert(f->size <= 32);
5953 m = 1u << (f->size - 1);
5954 r = (r ^ m) - m;
5955 break;
5956 case 2: /* dl+dh split, signed 20 bit. */
5957 r = ((int8_t)r << 12) | (r >> 8);
5958 break;
5959 default:
5960 abort();
5963 /* Validate that the "compressed" encoding we selected above is valid.
5964 I.e. we havn't make two different original fields overlap. */
5965 assert(((o->presentC >> f->indexC) & 1) == 0);
5966 o->presentC |= 1 << f->indexC;
5967 o->presentO |= 1 << f->indexO;
5969 o->c[f->indexC] = r;
5972 /* Lookup the insn at the current PC, extracting the operands into O and
5973 returning the info struct for the insn. Returns NULL for invalid insn. */
5975 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5976 DisasFields *f)
5978 uint64_t insn, pc = s->base.pc_next;
5979 int op, op2, ilen;
5980 const DisasInsn *info;
5982 if (unlikely(s->ex_value)) {
5983 /* Drop the EX data now, so that it's clear on exception paths. */
5984 TCGv_i64 zero = tcg_const_i64(0);
5985 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5986 tcg_temp_free_i64(zero);
5988 /* Extract the values saved by EXECUTE. */
5989 insn = s->ex_value & 0xffffffffffff0000ull;
5990 ilen = s->ex_value & 0xf;
5991 op = insn >> 56;
5992 } else {
5993 insn = ld_code2(env, pc);
5994 op = (insn >> 8) & 0xff;
5995 ilen = get_ilen(op);
5996 switch (ilen) {
5997 case 2:
5998 insn = insn << 48;
5999 break;
6000 case 4:
6001 insn = ld_code4(env, pc) << 32;
6002 break;
6003 case 6:
6004 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6005 break;
6006 default:
6007 g_assert_not_reached();
6010 s->pc_tmp = s->base.pc_next + ilen;
6011 s->ilen = ilen;
6013 /* We can't actually determine the insn format until we've looked up
6014 the full insn opcode. Which we can't do without locating the
6015 secondary opcode. Assume by default that OP2 is at bit 40; for
6016 those smaller insns that don't actually have a secondary opcode
6017 this will correctly result in OP2 = 0. */
6018 switch (op) {
6019 case 0x01: /* E */
6020 case 0x80: /* S */
6021 case 0x82: /* S */
6022 case 0x93: /* S */
6023 case 0xb2: /* S, RRF, RRE, IE */
6024 case 0xb3: /* RRE, RRD, RRF */
6025 case 0xb9: /* RRE, RRF */
6026 case 0xe5: /* SSE, SIL */
6027 op2 = (insn << 8) >> 56;
6028 break;
6029 case 0xa5: /* RI */
6030 case 0xa7: /* RI */
6031 case 0xc0: /* RIL */
6032 case 0xc2: /* RIL */
6033 case 0xc4: /* RIL */
6034 case 0xc6: /* RIL */
6035 case 0xc8: /* SSF */
6036 case 0xcc: /* RIL */
6037 op2 = (insn << 12) >> 60;
6038 break;
6039 case 0xc5: /* MII */
6040 case 0xc7: /* SMI */
6041 case 0xd0 ... 0xdf: /* SS */
6042 case 0xe1: /* SS */
6043 case 0xe2: /* SS */
6044 case 0xe8: /* SS */
6045 case 0xe9: /* SS */
6046 case 0xea: /* SS */
6047 case 0xee ... 0xf3: /* SS */
6048 case 0xf8 ... 0xfd: /* SS */
6049 op2 = 0;
6050 break;
6051 default:
6052 op2 = (insn << 40) >> 56;
6053 break;
6056 memset(f, 0, sizeof(*f));
6057 f->raw_insn = insn;
6058 f->op = op;
6059 f->op2 = op2;
6061 /* Lookup the instruction. */
6062 info = lookup_opc(op << 8 | op2);
6064 /* If we found it, extract the operands. */
6065 if (info != NULL) {
6066 DisasFormat fmt = info->fmt;
6067 int i;
6069 for (i = 0; i < NUM_C_FIELD; ++i) {
6070 extract_field(f, &format_info[fmt].op[i], insn);
6073 return info;
6076 static bool is_afp_reg(int reg)
6078 return reg % 2 || reg > 6;
6081 static bool is_fp_pair(int reg)
6083 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6084 return !(reg & 0x2);
6087 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6089 const DisasInsn *insn;
6090 DisasJumpType ret = DISAS_NEXT;
6091 DisasFields f;
6092 DisasOps o;
6094 /* Search for the insn in the table. */
6095 insn = extract_insn(env, s, &f);
6097 /* Not found means unimplemented/illegal opcode. */
6098 if (insn == NULL) {
6099 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6100 f.op, f.op2);
6101 gen_illegal_opcode(s);
6102 return DISAS_NORETURN;
6105 #ifndef CONFIG_USER_ONLY
6106 if (s->base.tb->flags & FLAG_MASK_PER) {
6107 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6108 gen_helper_per_ifetch(cpu_env, addr);
6109 tcg_temp_free_i64(addr);
6111 #endif
6113 /* process flags */
6114 if (insn->flags) {
6115 /* privileged instruction */
6116 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6117 gen_program_exception(s, PGM_PRIVILEGED);
6118 return DISAS_NORETURN;
6121 /* if AFP is not enabled, instructions and registers are forbidden */
6122 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6123 uint8_t dxc = 0;
6125 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6126 dxc = 1;
6128 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6129 dxc = 1;
6131 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6132 dxc = 1;
6134 if (insn->flags & IF_BFP) {
6135 dxc = 2;
6137 if (insn->flags & IF_DFP) {
6138 dxc = 3;
6140 if (dxc) {
6141 gen_data_exception(dxc);
6142 return DISAS_NORETURN;
6147 /* Check for insn specification exceptions. */
6148 if (insn->spec) {
6149 if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6150 (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6151 (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6152 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6153 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6154 gen_program_exception(s, PGM_SPECIFICATION);
6155 return DISAS_NORETURN;
6159 /* Set up the strutures we use to communicate with the helpers. */
6160 s->insn = insn;
6161 s->fields = &f;
6162 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6163 o.out = NULL;
6164 o.out2 = NULL;
6165 o.in1 = NULL;
6166 o.in2 = NULL;
6167 o.addr1 = NULL;
6169 /* Implement the instruction. */
6170 if (insn->help_in1) {
6171 insn->help_in1(s, &f, &o);
6173 if (insn->help_in2) {
6174 insn->help_in2(s, &f, &o);
6176 if (insn->help_prep) {
6177 insn->help_prep(s, &f, &o);
6179 if (insn->help_op) {
6180 ret = insn->help_op(s, &o);
6182 if (ret != DISAS_NORETURN) {
6183 if (insn->help_wout) {
6184 insn->help_wout(s, &f, &o);
6186 if (insn->help_cout) {
6187 insn->help_cout(s, &o);
6191 /* Free any temporaries created by the helpers. */
6192 if (o.out && !o.g_out) {
6193 tcg_temp_free_i64(o.out);
6195 if (o.out2 && !o.g_out2) {
6196 tcg_temp_free_i64(o.out2);
6198 if (o.in1 && !o.g_in1) {
6199 tcg_temp_free_i64(o.in1);
6201 if (o.in2 && !o.g_in2) {
6202 tcg_temp_free_i64(o.in2);
6204 if (o.addr1) {
6205 tcg_temp_free_i64(o.addr1);
6208 #ifndef CONFIG_USER_ONLY
6209 if (s->base.tb->flags & FLAG_MASK_PER) {
6210 /* An exception might be triggered, save PSW if not already done. */
6211 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6212 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6215 /* Call the helper to check for a possible PER exception. */
6216 gen_helper_per_check_exception(cpu_env);
6218 #endif
6220 /* Advance to the next instruction. */
6221 s->base.pc_next = s->pc_tmp;
6222 return ret;
6225 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6227 DisasContext *dc = container_of(dcbase, DisasContext, base);
6229 /* 31-bit mode */
6230 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6231 dc->base.pc_first &= 0x7fffffff;
6232 dc->base.pc_next = dc->base.pc_first;
6235 dc->cc_op = CC_OP_DYNAMIC;
6236 dc->ex_value = dc->base.tb->cs_base;
6237 dc->do_debug = dc->base.singlestep_enabled;
6240 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6244 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6246 DisasContext *dc = container_of(dcbase, DisasContext, base);
6248 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6251 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6252 const CPUBreakpoint *bp)
6254 DisasContext *dc = container_of(dcbase, DisasContext, base);
6256 dc->base.is_jmp = DISAS_PC_STALE;
6257 dc->do_debug = true;
6258 /* The address covered by the breakpoint must be included in
6259 [tb->pc, tb->pc + tb->size) in order to for it to be
6260 properly cleared -- thus we increment the PC here so that
6261 the logic setting tb->size does the right thing. */
6262 dc->base.pc_next += 2;
6263 return true;
6266 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6268 CPUS390XState *env = cs->env_ptr;
6269 DisasContext *dc = container_of(dcbase, DisasContext, base);
6271 dc->base.is_jmp = translate_one(env, dc);
6272 if (dc->base.is_jmp == DISAS_NEXT) {
6273 uint64_t page_start;
6275 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6276 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6277 dc->base.is_jmp = DISAS_TOO_MANY;
6282 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6284 DisasContext *dc = container_of(dcbase, DisasContext, base);
6286 switch (dc->base.is_jmp) {
6287 case DISAS_GOTO_TB:
6288 case DISAS_NORETURN:
6289 break;
6290 case DISAS_TOO_MANY:
6291 case DISAS_PC_STALE:
6292 case DISAS_PC_STALE_NOCHAIN:
6293 update_psw_addr(dc);
6294 /* FALLTHRU */
6295 case DISAS_PC_UPDATED:
6296 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6297 cc op type is in env */
6298 update_cc_op(dc);
6299 /* FALLTHRU */
6300 case DISAS_PC_CC_UPDATED:
6301 /* Exit the TB, either by raising a debug exception or by return. */
6302 if (dc->do_debug) {
6303 gen_exception(EXCP_DEBUG);
6304 } else if (use_exit_tb(dc) ||
6305 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6306 tcg_gen_exit_tb(NULL, 0);
6307 } else {
6308 tcg_gen_lookup_and_goto_ptr();
6310 break;
6311 default:
6312 g_assert_not_reached();
6316 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6318 DisasContext *dc = container_of(dcbase, DisasContext, base);
6320 if (unlikely(dc->ex_value)) {
6321 /* ??? Unfortunately log_target_disas can't use host memory. */
6322 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6323 } else {
6324 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6325 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6329 static const TranslatorOps s390x_tr_ops = {
6330 .init_disas_context = s390x_tr_init_disas_context,
6331 .tb_start = s390x_tr_tb_start,
6332 .insn_start = s390x_tr_insn_start,
6333 .breakpoint_check = s390x_tr_breakpoint_check,
6334 .translate_insn = s390x_tr_translate_insn,
6335 .tb_stop = s390x_tr_tb_stop,
6336 .disas_log = s390x_tr_disas_log,
6339 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
6341 DisasContext dc;
6343 translator_loop(&s390x_tr_ops, &dc.base, cs, tb);
6346 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6347 target_ulong *data)
6349 int cc_op = data[1];
6350 env->psw.addr = data[0];
6351 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6352 env->cc_op = cc_op;