s390x/tcg: implement SET CLOCK
[qemu/ar7.git] / target / s390x / translate.c
blob57c03cbf580aab8c4b4b236ddca095a35761a9b9
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
46 #include "exec/log.h"
49 /* Information that (most) every instruction needs to manipulate. */
50 typedef struct DisasContext DisasContext;
51 typedef struct DisasInsn DisasInsn;
52 typedef struct DisasFields DisasFields;
54 struct DisasContext {
55 DisasContextBase base;
56 const DisasInsn *insn;
57 DisasFields *fields;
58 uint64_t ex_value;
60 * During translate_one(), pc_tmp is used to determine the instruction
61 * to be executed after base.pc_next - e.g. next sequential instruction
62 * or a branch target.
64 uint64_t pc_tmp;
65 uint32_t ilen;
66 enum cc_op cc_op;
67 bool do_debug;
70 /* Information carried about a condition to be evaluated. */
71 typedef struct {
72 TCGCond cond:8;
73 bool is_64;
74 bool g1;
75 bool g2;
76 union {
77 struct { TCGv_i64 a, b; } s64;
78 struct { TCGv_i32 a, b; } s32;
79 } u;
80 } DisasCompare;
82 #ifdef DEBUG_INLINE_BRANCHES
83 static uint64_t inline_branch_hit[CC_OP_MAX];
84 static uint64_t inline_branch_miss[CC_OP_MAX];
85 #endif
87 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
89 if (!(s->base.tb->flags & FLAG_MASK_64)) {
90 if (s->base.tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
94 return pc;
97 static TCGv_i64 psw_addr;
98 static TCGv_i64 psw_mask;
99 static TCGv_i64 gbea;
101 static TCGv_i32 cc_op;
102 static TCGv_i64 cc_src;
103 static TCGv_i64 cc_dst;
104 static TCGv_i64 cc_vr;
106 static char cpu_reg_names[32][4];
107 static TCGv_i64 regs[16];
108 static TCGv_i64 fregs[16];
110 void s390x_translate_init(void)
112 int i;
114 psw_addr = tcg_global_mem_new_i64(cpu_env,
115 offsetof(CPUS390XState, psw.addr),
116 "psw_addr");
117 psw_mask = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUS390XState, psw.mask),
119 "psw_mask");
120 gbea = tcg_global_mem_new_i64(cpu_env,
121 offsetof(CPUS390XState, gbea),
122 "gbea");
124 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
125 "cc_op");
126 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
127 "cc_src");
128 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
129 "cc_dst");
130 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
131 "cc_vr");
133 for (i = 0; i < 16; i++) {
134 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
135 regs[i] = tcg_global_mem_new(cpu_env,
136 offsetof(CPUS390XState, regs[i]),
137 cpu_reg_names[i]);
140 for (i = 0; i < 16; i++) {
141 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
142 fregs[i] = tcg_global_mem_new(cpu_env,
143 offsetof(CPUS390XState, vregs[i][0].d),
144 cpu_reg_names[i + 16]);
148 static TCGv_i64 load_reg(int reg)
150 TCGv_i64 r = tcg_temp_new_i64();
151 tcg_gen_mov_i64(r, regs[reg]);
152 return r;
155 static TCGv_i64 load_freg32_i64(int reg)
157 TCGv_i64 r = tcg_temp_new_i64();
158 tcg_gen_shri_i64(r, fregs[reg], 32);
159 return r;
162 static void store_reg(int reg, TCGv_i64 v)
164 tcg_gen_mov_i64(regs[reg], v);
167 static void store_freg(int reg, TCGv_i64 v)
169 tcg_gen_mov_i64(fregs[reg], v);
172 static void store_reg32_i64(int reg, TCGv_i64 v)
174 /* 32 bit register writes keep the upper half */
175 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
178 static void store_reg32h_i64(int reg, TCGv_i64 v)
180 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
183 static void store_freg32_i64(int reg, TCGv_i64 v)
185 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
188 static void return_low128(TCGv_i64 dest)
190 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
193 static void update_psw_addr(DisasContext *s)
195 /* psw.addr */
196 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
199 static void per_branch(DisasContext *s, bool to_next)
201 #ifndef CONFIG_USER_ONLY
202 tcg_gen_movi_i64(gbea, s->base.pc_next);
204 if (s->base.tb->flags & FLAG_MASK_PER) {
205 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
206 gen_helper_per_branch(cpu_env, gbea, next_pc);
207 if (to_next) {
208 tcg_temp_free_i64(next_pc);
211 #endif
214 static void per_branch_cond(DisasContext *s, TCGCond cond,
215 TCGv_i64 arg1, TCGv_i64 arg2)
217 #ifndef CONFIG_USER_ONLY
218 if (s->base.tb->flags & FLAG_MASK_PER) {
219 TCGLabel *lab = gen_new_label();
220 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
222 tcg_gen_movi_i64(gbea, s->base.pc_next);
223 gen_helper_per_branch(cpu_env, gbea, psw_addr);
225 gen_set_label(lab);
226 } else {
227 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
228 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
229 tcg_temp_free_i64(pc);
231 #endif
234 static void per_breaking_event(DisasContext *s)
236 tcg_gen_movi_i64(gbea, s->base.pc_next);
239 static void update_cc_op(DisasContext *s)
241 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
242 tcg_gen_movi_i32(cc_op, s->cc_op);
246 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
248 return (uint64_t)cpu_lduw_code(env, pc);
251 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
253 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
256 static int get_mem_index(DisasContext *s)
258 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
259 return MMU_REAL_IDX;
262 switch (s->base.tb->flags & FLAG_MASK_ASC) {
263 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
264 return MMU_PRIMARY_IDX;
265 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
266 return MMU_SECONDARY_IDX;
267 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
268 return MMU_HOME_IDX;
269 default:
270 tcg_abort();
271 break;
275 static void gen_exception(int excp)
277 TCGv_i32 tmp = tcg_const_i32(excp);
278 gen_helper_exception(cpu_env, tmp);
279 tcg_temp_free_i32(tmp);
282 static void gen_program_exception(DisasContext *s, int code)
284 TCGv_i32 tmp;
286 /* Remember what pgm exeption this was. */
287 tmp = tcg_const_i32(code);
288 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
289 tcg_temp_free_i32(tmp);
291 tmp = tcg_const_i32(s->ilen);
292 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
293 tcg_temp_free_i32(tmp);
295 /* update the psw */
296 update_psw_addr(s);
298 /* Save off cc. */
299 update_cc_op(s);
301 /* Trigger exception. */
302 gen_exception(EXCP_PGM);
305 static inline void gen_illegal_opcode(DisasContext *s)
307 gen_program_exception(s, PGM_OPERATION);
310 static inline void gen_trap(DisasContext *s)
312 TCGv_i32 t;
314 /* Set DXC to 0xff. */
315 t = tcg_temp_new_i32();
316 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
317 tcg_gen_ori_i32(t, t, 0xff00);
318 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
319 tcg_temp_free_i32(t);
321 gen_program_exception(s, PGM_DATA);
324 #ifndef CONFIG_USER_ONLY
325 static void check_privileged(DisasContext *s)
327 if (s->base.tb->flags & FLAG_MASK_PSTATE) {
328 gen_program_exception(s, PGM_PRIVILEGED);
331 #endif
333 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
335 TCGv_i64 tmp = tcg_temp_new_i64();
336 bool need_31 = !(s->base.tb->flags & FLAG_MASK_64);
338 /* Note that d2 is limited to 20 bits, signed. If we crop negative
339 displacements early we create larger immedate addends. */
341 /* Note that addi optimizes the imm==0 case. */
342 if (b2 && x2) {
343 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
344 tcg_gen_addi_i64(tmp, tmp, d2);
345 } else if (b2) {
346 tcg_gen_addi_i64(tmp, regs[b2], d2);
347 } else if (x2) {
348 tcg_gen_addi_i64(tmp, regs[x2], d2);
349 } else {
350 if (need_31) {
351 d2 &= 0x7fffffff;
352 need_31 = false;
354 tcg_gen_movi_i64(tmp, d2);
356 if (need_31) {
357 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
360 return tmp;
363 static inline bool live_cc_data(DisasContext *s)
365 return (s->cc_op != CC_OP_DYNAMIC
366 && s->cc_op != CC_OP_STATIC
367 && s->cc_op > 3);
370 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
372 if (live_cc_data(s)) {
373 tcg_gen_discard_i64(cc_src);
374 tcg_gen_discard_i64(cc_dst);
375 tcg_gen_discard_i64(cc_vr);
377 s->cc_op = CC_OP_CONST0 + val;
380 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
382 if (live_cc_data(s)) {
383 tcg_gen_discard_i64(cc_src);
384 tcg_gen_discard_i64(cc_vr);
386 tcg_gen_mov_i64(cc_dst, dst);
387 s->cc_op = op;
390 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
391 TCGv_i64 dst)
393 if (live_cc_data(s)) {
394 tcg_gen_discard_i64(cc_vr);
396 tcg_gen_mov_i64(cc_src, src);
397 tcg_gen_mov_i64(cc_dst, dst);
398 s->cc_op = op;
401 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
402 TCGv_i64 dst, TCGv_i64 vr)
404 tcg_gen_mov_i64(cc_src, src);
405 tcg_gen_mov_i64(cc_dst, dst);
406 tcg_gen_mov_i64(cc_vr, vr);
407 s->cc_op = op;
410 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
412 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
415 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
417 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
420 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
422 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
425 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
427 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
430 /* CC value is in env->cc_op */
431 static void set_cc_static(DisasContext *s)
433 if (live_cc_data(s)) {
434 tcg_gen_discard_i64(cc_src);
435 tcg_gen_discard_i64(cc_dst);
436 tcg_gen_discard_i64(cc_vr);
438 s->cc_op = CC_OP_STATIC;
441 /* calculates cc into cc_op */
442 static void gen_op_calc_cc(DisasContext *s)
444 TCGv_i32 local_cc_op = NULL;
445 TCGv_i64 dummy = NULL;
447 switch (s->cc_op) {
448 default:
449 dummy = tcg_const_i64(0);
450 /* FALLTHRU */
451 case CC_OP_ADD_64:
452 case CC_OP_ADDU_64:
453 case CC_OP_ADDC_64:
454 case CC_OP_SUB_64:
455 case CC_OP_SUBU_64:
456 case CC_OP_SUBB_64:
457 case CC_OP_ADD_32:
458 case CC_OP_ADDU_32:
459 case CC_OP_ADDC_32:
460 case CC_OP_SUB_32:
461 case CC_OP_SUBU_32:
462 case CC_OP_SUBB_32:
463 local_cc_op = tcg_const_i32(s->cc_op);
464 break;
465 case CC_OP_CONST0:
466 case CC_OP_CONST1:
467 case CC_OP_CONST2:
468 case CC_OP_CONST3:
469 case CC_OP_STATIC:
470 case CC_OP_DYNAMIC:
471 break;
474 switch (s->cc_op) {
475 case CC_OP_CONST0:
476 case CC_OP_CONST1:
477 case CC_OP_CONST2:
478 case CC_OP_CONST3:
479 /* s->cc_op is the cc value */
480 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
481 break;
482 case CC_OP_STATIC:
483 /* env->cc_op already is the cc value */
484 break;
485 case CC_OP_NZ:
486 case CC_OP_ABS_64:
487 case CC_OP_NABS_64:
488 case CC_OP_ABS_32:
489 case CC_OP_NABS_32:
490 case CC_OP_LTGT0_32:
491 case CC_OP_LTGT0_64:
492 case CC_OP_COMP_32:
493 case CC_OP_COMP_64:
494 case CC_OP_NZ_F32:
495 case CC_OP_NZ_F64:
496 case CC_OP_FLOGR:
497 /* 1 argument */
498 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
499 break;
500 case CC_OP_ICM:
501 case CC_OP_LTGT_32:
502 case CC_OP_LTGT_64:
503 case CC_OP_LTUGTU_32:
504 case CC_OP_LTUGTU_64:
505 case CC_OP_TM_32:
506 case CC_OP_TM_64:
507 case CC_OP_SLA_32:
508 case CC_OP_SLA_64:
509 case CC_OP_NZ_F128:
510 /* 2 arguments */
511 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
512 break;
513 case CC_OP_ADD_64:
514 case CC_OP_ADDU_64:
515 case CC_OP_ADDC_64:
516 case CC_OP_SUB_64:
517 case CC_OP_SUBU_64:
518 case CC_OP_SUBB_64:
519 case CC_OP_ADD_32:
520 case CC_OP_ADDU_32:
521 case CC_OP_ADDC_32:
522 case CC_OP_SUB_32:
523 case CC_OP_SUBU_32:
524 case CC_OP_SUBB_32:
525 /* 3 arguments */
526 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
527 break;
528 case CC_OP_DYNAMIC:
529 /* unknown operation - assume 3 arguments and cc_op in env */
530 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
531 break;
532 default:
533 tcg_abort();
536 if (local_cc_op) {
537 tcg_temp_free_i32(local_cc_op);
539 if (dummy) {
540 tcg_temp_free_i64(dummy);
543 /* We now have cc in cc_op as constant */
544 set_cc_static(s);
547 static bool use_exit_tb(DisasContext *s)
549 return s->base.singlestep_enabled ||
550 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
551 (s->base.tb->flags & FLAG_MASK_PER);
554 static bool use_goto_tb(DisasContext *s, uint64_t dest)
556 if (unlikely(use_exit_tb(s))) {
557 return false;
559 #ifndef CONFIG_USER_ONLY
560 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
561 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
562 #else
563 return true;
564 #endif
567 static void account_noninline_branch(DisasContext *s, int cc_op)
569 #ifdef DEBUG_INLINE_BRANCHES
570 inline_branch_miss[cc_op]++;
571 #endif
574 static void account_inline_branch(DisasContext *s, int cc_op)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_hit[cc_op]++;
578 #endif
581 /* Table of mask values to comparison codes, given a comparison as input.
582 For such, CC=3 should not be possible. */
583 static const TCGCond ltgt_cond[16] = {
584 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
585 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
586 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
587 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
588 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
589 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
590 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
591 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
594 /* Table of mask values to comparison codes, given a logic op as input.
595 For such, only CC=0 and CC=1 should be possible. */
596 static const TCGCond nz_cond[16] = {
597 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
598 TCG_COND_NEVER, TCG_COND_NEVER,
599 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
600 TCG_COND_NE, TCG_COND_NE,
601 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
602 TCG_COND_EQ, TCG_COND_EQ,
603 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
604 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
607 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
608 details required to generate a TCG comparison. */
609 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
611 TCGCond cond;
612 enum cc_op old_cc_op = s->cc_op;
614 if (mask == 15 || mask == 0) {
615 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
616 c->u.s32.a = cc_op;
617 c->u.s32.b = cc_op;
618 c->g1 = c->g2 = true;
619 c->is_64 = false;
620 return;
623 /* Find the TCG condition for the mask + cc op. */
624 switch (old_cc_op) {
625 case CC_OP_LTGT0_32:
626 case CC_OP_LTGT0_64:
627 case CC_OP_LTGT_32:
628 case CC_OP_LTGT_64:
629 cond = ltgt_cond[mask];
630 if (cond == TCG_COND_NEVER) {
631 goto do_dynamic;
633 account_inline_branch(s, old_cc_op);
634 break;
636 case CC_OP_LTUGTU_32:
637 case CC_OP_LTUGTU_64:
638 cond = tcg_unsigned_cond(ltgt_cond[mask]);
639 if (cond == TCG_COND_NEVER) {
640 goto do_dynamic;
642 account_inline_branch(s, old_cc_op);
643 break;
645 case CC_OP_NZ:
646 cond = nz_cond[mask];
647 if (cond == TCG_COND_NEVER) {
648 goto do_dynamic;
650 account_inline_branch(s, old_cc_op);
651 break;
653 case CC_OP_TM_32:
654 case CC_OP_TM_64:
655 switch (mask) {
656 case 8:
657 cond = TCG_COND_EQ;
658 break;
659 case 4 | 2 | 1:
660 cond = TCG_COND_NE;
661 break;
662 default:
663 goto do_dynamic;
665 account_inline_branch(s, old_cc_op);
666 break;
668 case CC_OP_ICM:
669 switch (mask) {
670 case 8:
671 cond = TCG_COND_EQ;
672 break;
673 case 4 | 2 | 1:
674 case 4 | 2:
675 cond = TCG_COND_NE;
676 break;
677 default:
678 goto do_dynamic;
680 account_inline_branch(s, old_cc_op);
681 break;
683 case CC_OP_FLOGR:
684 switch (mask & 0xa) {
685 case 8: /* src == 0 -> no one bit found */
686 cond = TCG_COND_EQ;
687 break;
688 case 2: /* src != 0 -> one bit found */
689 cond = TCG_COND_NE;
690 break;
691 default:
692 goto do_dynamic;
694 account_inline_branch(s, old_cc_op);
695 break;
697 case CC_OP_ADDU_32:
698 case CC_OP_ADDU_64:
699 switch (mask) {
700 case 8 | 2: /* vr == 0 */
701 cond = TCG_COND_EQ;
702 break;
703 case 4 | 1: /* vr != 0 */
704 cond = TCG_COND_NE;
705 break;
706 case 8 | 4: /* no carry -> vr >= src */
707 cond = TCG_COND_GEU;
708 break;
709 case 2 | 1: /* carry -> vr < src */
710 cond = TCG_COND_LTU;
711 break;
712 default:
713 goto do_dynamic;
715 account_inline_branch(s, old_cc_op);
716 break;
718 case CC_OP_SUBU_32:
719 case CC_OP_SUBU_64:
720 /* Note that CC=0 is impossible; treat it as dont-care. */
721 switch (mask & 7) {
722 case 2: /* zero -> op1 == op2 */
723 cond = TCG_COND_EQ;
724 break;
725 case 4 | 1: /* !zero -> op1 != op2 */
726 cond = TCG_COND_NE;
727 break;
728 case 4: /* borrow (!carry) -> op1 < op2 */
729 cond = TCG_COND_LTU;
730 break;
731 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
732 cond = TCG_COND_GEU;
733 break;
734 default:
735 goto do_dynamic;
737 account_inline_branch(s, old_cc_op);
738 break;
740 default:
741 do_dynamic:
742 /* Calculate cc value. */
743 gen_op_calc_cc(s);
744 /* FALLTHRU */
746 case CC_OP_STATIC:
747 /* Jump based on CC. We'll load up the real cond below;
748 the assignment here merely avoids a compiler warning. */
749 account_noninline_branch(s, old_cc_op);
750 old_cc_op = CC_OP_STATIC;
751 cond = TCG_COND_NEVER;
752 break;
755 /* Load up the arguments of the comparison. */
756 c->is_64 = true;
757 c->g1 = c->g2 = false;
758 switch (old_cc_op) {
759 case CC_OP_LTGT0_32:
760 c->is_64 = false;
761 c->u.s32.a = tcg_temp_new_i32();
762 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
763 c->u.s32.b = tcg_const_i32(0);
764 break;
765 case CC_OP_LTGT_32:
766 case CC_OP_LTUGTU_32:
767 case CC_OP_SUBU_32:
768 c->is_64 = false;
769 c->u.s32.a = tcg_temp_new_i32();
770 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
771 c->u.s32.b = tcg_temp_new_i32();
772 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
773 break;
775 case CC_OP_LTGT0_64:
776 case CC_OP_NZ:
777 case CC_OP_FLOGR:
778 c->u.s64.a = cc_dst;
779 c->u.s64.b = tcg_const_i64(0);
780 c->g1 = true;
781 break;
782 case CC_OP_LTGT_64:
783 case CC_OP_LTUGTU_64:
784 case CC_OP_SUBU_64:
785 c->u.s64.a = cc_src;
786 c->u.s64.b = cc_dst;
787 c->g1 = c->g2 = true;
788 break;
790 case CC_OP_TM_32:
791 case CC_OP_TM_64:
792 case CC_OP_ICM:
793 c->u.s64.a = tcg_temp_new_i64();
794 c->u.s64.b = tcg_const_i64(0);
795 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
796 break;
798 case CC_OP_ADDU_32:
799 c->is_64 = false;
800 c->u.s32.a = tcg_temp_new_i32();
801 c->u.s32.b = tcg_temp_new_i32();
802 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
803 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
804 tcg_gen_movi_i32(c->u.s32.b, 0);
805 } else {
806 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
808 break;
810 case CC_OP_ADDU_64:
811 c->u.s64.a = cc_vr;
812 c->g1 = true;
813 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
814 c->u.s64.b = tcg_const_i64(0);
815 } else {
816 c->u.s64.b = cc_src;
817 c->g2 = true;
819 break;
821 case CC_OP_STATIC:
822 c->is_64 = false;
823 c->u.s32.a = cc_op;
824 c->g1 = true;
825 switch (mask) {
826 case 0x8 | 0x4 | 0x2: /* cc != 3 */
827 cond = TCG_COND_NE;
828 c->u.s32.b = tcg_const_i32(3);
829 break;
830 case 0x8 | 0x4 | 0x1: /* cc != 2 */
831 cond = TCG_COND_NE;
832 c->u.s32.b = tcg_const_i32(2);
833 break;
834 case 0x8 | 0x2 | 0x1: /* cc != 1 */
835 cond = TCG_COND_NE;
836 c->u.s32.b = tcg_const_i32(1);
837 break;
838 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
839 cond = TCG_COND_EQ;
840 c->g1 = false;
841 c->u.s32.a = tcg_temp_new_i32();
842 c->u.s32.b = tcg_const_i32(0);
843 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
844 break;
845 case 0x8 | 0x4: /* cc < 2 */
846 cond = TCG_COND_LTU;
847 c->u.s32.b = tcg_const_i32(2);
848 break;
849 case 0x8: /* cc == 0 */
850 cond = TCG_COND_EQ;
851 c->u.s32.b = tcg_const_i32(0);
852 break;
853 case 0x4 | 0x2 | 0x1: /* cc != 0 */
854 cond = TCG_COND_NE;
855 c->u.s32.b = tcg_const_i32(0);
856 break;
857 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
858 cond = TCG_COND_NE;
859 c->g1 = false;
860 c->u.s32.a = tcg_temp_new_i32();
861 c->u.s32.b = tcg_const_i32(0);
862 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
863 break;
864 case 0x4: /* cc == 1 */
865 cond = TCG_COND_EQ;
866 c->u.s32.b = tcg_const_i32(1);
867 break;
868 case 0x2 | 0x1: /* cc > 1 */
869 cond = TCG_COND_GTU;
870 c->u.s32.b = tcg_const_i32(1);
871 break;
872 case 0x2: /* cc == 2 */
873 cond = TCG_COND_EQ;
874 c->u.s32.b = tcg_const_i32(2);
875 break;
876 case 0x1: /* cc == 3 */
877 cond = TCG_COND_EQ;
878 c->u.s32.b = tcg_const_i32(3);
879 break;
880 default:
881 /* CC is masked by something else: (8 >> cc) & mask. */
882 cond = TCG_COND_NE;
883 c->g1 = false;
884 c->u.s32.a = tcg_const_i32(8);
885 c->u.s32.b = tcg_const_i32(0);
886 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
887 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
888 break;
890 break;
892 default:
893 abort();
895 c->cond = cond;
898 static void free_compare(DisasCompare *c)
900 if (!c->g1) {
901 if (c->is_64) {
902 tcg_temp_free_i64(c->u.s64.a);
903 } else {
904 tcg_temp_free_i32(c->u.s32.a);
907 if (!c->g2) {
908 if (c->is_64) {
909 tcg_temp_free_i64(c->u.s64.b);
910 } else {
911 tcg_temp_free_i32(c->u.s32.b);
916 /* ====================================================================== */
917 /* Define the insn format enumeration. */
918 #define F0(N) FMT_##N,
919 #define F1(N, X1) F0(N)
920 #define F2(N, X1, X2) F0(N)
921 #define F3(N, X1, X2, X3) F0(N)
922 #define F4(N, X1, X2, X3, X4) F0(N)
923 #define F5(N, X1, X2, X3, X4, X5) F0(N)
925 typedef enum {
926 #include "insn-format.def"
927 } DisasFormat;
929 #undef F0
930 #undef F1
931 #undef F2
932 #undef F3
933 #undef F4
934 #undef F5
936 /* Define a structure to hold the decoded fields. We'll store each inside
937 an array indexed by an enum. In order to conserve memory, we'll arrange
938 for fields that do not exist at the same time to overlap, thus the "C"
939 for compact. For checking purposes there is an "O" for original index
940 as well that will be applied to availability bitmaps. */
942 enum DisasFieldIndexO {
943 FLD_O_r1,
944 FLD_O_r2,
945 FLD_O_r3,
946 FLD_O_m1,
947 FLD_O_m3,
948 FLD_O_m4,
949 FLD_O_b1,
950 FLD_O_b2,
951 FLD_O_b4,
952 FLD_O_d1,
953 FLD_O_d2,
954 FLD_O_d4,
955 FLD_O_x2,
956 FLD_O_l1,
957 FLD_O_l2,
958 FLD_O_i1,
959 FLD_O_i2,
960 FLD_O_i3,
961 FLD_O_i4,
962 FLD_O_i5
965 enum DisasFieldIndexC {
966 FLD_C_r1 = 0,
967 FLD_C_m1 = 0,
968 FLD_C_b1 = 0,
969 FLD_C_i1 = 0,
971 FLD_C_r2 = 1,
972 FLD_C_b2 = 1,
973 FLD_C_i2 = 1,
975 FLD_C_r3 = 2,
976 FLD_C_m3 = 2,
977 FLD_C_i3 = 2,
979 FLD_C_m4 = 3,
980 FLD_C_b4 = 3,
981 FLD_C_i4 = 3,
982 FLD_C_l1 = 3,
984 FLD_C_i5 = 4,
985 FLD_C_d1 = 4,
987 FLD_C_d2 = 5,
989 FLD_C_d4 = 6,
990 FLD_C_x2 = 6,
991 FLD_C_l2 = 6,
993 NUM_C_FIELD = 7
996 struct DisasFields {
997 uint64_t raw_insn;
998 unsigned op:8;
999 unsigned op2:8;
1000 unsigned presentC:16;
1001 unsigned int presentO;
1002 int c[NUM_C_FIELD];
1005 /* This is the way fields are to be accessed out of DisasFields. */
1006 #define have_field(S, F) have_field1((S), FLD_O_##F)
1007 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1009 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1011 return (f->presentO >> c) & 1;
1014 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1015 enum DisasFieldIndexC c)
1017 assert(have_field1(f, o));
1018 return f->c[c];
1021 /* Describe the layout of each field in each format. */
1022 typedef struct DisasField {
1023 unsigned int beg:8;
1024 unsigned int size:8;
1025 unsigned int type:2;
1026 unsigned int indexC:6;
1027 enum DisasFieldIndexO indexO:8;
1028 } DisasField;
1030 typedef struct DisasFormatInfo {
1031 DisasField op[NUM_C_FIELD];
1032 } DisasFormatInfo;
1034 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1035 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1036 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1037 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1040 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1041 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1045 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1046 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1047 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1049 #define F0(N) { { } },
1050 #define F1(N, X1) { { X1 } },
1051 #define F2(N, X1, X2) { { X1, X2 } },
1052 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1053 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1054 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1056 static const DisasFormatInfo format_info[] = {
1057 #include "insn-format.def"
1060 #undef F0
1061 #undef F1
1062 #undef F2
1063 #undef F3
1064 #undef F4
1065 #undef F5
1066 #undef R
1067 #undef M
1068 #undef BD
1069 #undef BXD
1070 #undef BDL
1071 #undef BXDL
1072 #undef I
1073 #undef L
1075 /* Generally, we'll extract operands into this structures, operate upon
1076 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1077 of routines below for more details. */
1078 typedef struct {
1079 bool g_out, g_out2, g_in1, g_in2;
1080 TCGv_i64 out, out2, in1, in2;
1081 TCGv_i64 addr1;
1082 } DisasOps;
1084 /* Instructions can place constraints on their operands, raising specification
1085 exceptions if they are violated. To make this easy to automate, each "in1",
1086 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1087 of the following, or 0. To make this easy to document, we'll put the
1088 SPEC_<name> defines next to <name>. */
1090 #define SPEC_r1_even 1
1091 #define SPEC_r2_even 2
1092 #define SPEC_r3_even 4
1093 #define SPEC_r1_f128 8
1094 #define SPEC_r2_f128 16
1096 /* Return values from translate_one, indicating the state of the TB. */
1098 /* We are not using a goto_tb (for whatever reason), but have updated
1099 the PC (for whatever reason), so there's no need to do it again on
1100 exiting the TB. */
1101 #define DISAS_PC_UPDATED DISAS_TARGET_0
1103 /* We have emitted one or more goto_tb. No fixup required. */
1104 #define DISAS_GOTO_TB DISAS_TARGET_1
1106 /* We have updated the PC and CC values. */
1107 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1109 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1110 updated the PC for the next instruction to be executed. */
1111 #define DISAS_PC_STALE DISAS_TARGET_3
1113 /* We are exiting the TB to the main loop. */
1114 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1116 struct DisasInsn {
1117 unsigned opc:16;
1118 DisasFormat fmt:8;
1119 unsigned fac:8;
1120 unsigned spec:8;
1122 const char *name;
1124 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1125 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1126 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1127 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1128 void (*help_cout)(DisasContext *, DisasOps *);
1129 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1131 uint64_t data;
1134 /* ====================================================================== */
1135 /* Miscellaneous helpers, used by several operations. */
1137 static void help_l2_shift(DisasContext *s, DisasFields *f,
1138 DisasOps *o, int mask)
1140 int b2 = get_field(f, b2);
1141 int d2 = get_field(f, d2);
1143 if (b2 == 0) {
1144 o->in2 = tcg_const_i64(d2 & mask);
1145 } else {
1146 o->in2 = get_address(s, 0, b2, d2);
1147 tcg_gen_andi_i64(o->in2, o->in2, mask);
1151 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1153 if (dest == s->pc_tmp) {
1154 per_branch(s, true);
1155 return DISAS_NEXT;
1157 if (use_goto_tb(s, dest)) {
1158 update_cc_op(s);
1159 per_breaking_event(s);
1160 tcg_gen_goto_tb(0);
1161 tcg_gen_movi_i64(psw_addr, dest);
1162 tcg_gen_exit_tb(s->base.tb, 0);
1163 return DISAS_GOTO_TB;
1164 } else {
1165 tcg_gen_movi_i64(psw_addr, dest);
1166 per_branch(s, false);
1167 return DISAS_PC_UPDATED;
1171 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1172 bool is_imm, int imm, TCGv_i64 cdest)
1174 DisasJumpType ret;
1175 uint64_t dest = s->base.pc_next + 2 * imm;
1176 TCGLabel *lab;
1178 /* Take care of the special cases first. */
1179 if (c->cond == TCG_COND_NEVER) {
1180 ret = DISAS_NEXT;
1181 goto egress;
1183 if (is_imm) {
1184 if (dest == s->pc_tmp) {
1185 /* Branch to next. */
1186 per_branch(s, true);
1187 ret = DISAS_NEXT;
1188 goto egress;
1190 if (c->cond == TCG_COND_ALWAYS) {
1191 ret = help_goto_direct(s, dest);
1192 goto egress;
1194 } else {
1195 if (!cdest) {
1196 /* E.g. bcr %r0 -> no branch. */
1197 ret = DISAS_NEXT;
1198 goto egress;
1200 if (c->cond == TCG_COND_ALWAYS) {
1201 tcg_gen_mov_i64(psw_addr, cdest);
1202 per_branch(s, false);
1203 ret = DISAS_PC_UPDATED;
1204 goto egress;
1208 if (use_goto_tb(s, s->pc_tmp)) {
1209 if (is_imm && use_goto_tb(s, dest)) {
1210 /* Both exits can use goto_tb. */
1211 update_cc_op(s);
1213 lab = gen_new_label();
1214 if (c->is_64) {
1215 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1216 } else {
1217 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1220 /* Branch not taken. */
1221 tcg_gen_goto_tb(0);
1222 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1223 tcg_gen_exit_tb(s->base.tb, 0);
1225 /* Branch taken. */
1226 gen_set_label(lab);
1227 per_breaking_event(s);
1228 tcg_gen_goto_tb(1);
1229 tcg_gen_movi_i64(psw_addr, dest);
1230 tcg_gen_exit_tb(s->base.tb, 1);
1232 ret = DISAS_GOTO_TB;
1233 } else {
1234 /* Fallthru can use goto_tb, but taken branch cannot. */
1235 /* Store taken branch destination before the brcond. This
1236 avoids having to allocate a new local temp to hold it.
1237 We'll overwrite this in the not taken case anyway. */
1238 if (!is_imm) {
1239 tcg_gen_mov_i64(psw_addr, cdest);
1242 lab = gen_new_label();
1243 if (c->is_64) {
1244 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1245 } else {
1246 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1249 /* Branch not taken. */
1250 update_cc_op(s);
1251 tcg_gen_goto_tb(0);
1252 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1253 tcg_gen_exit_tb(s->base.tb, 0);
1255 gen_set_label(lab);
1256 if (is_imm) {
1257 tcg_gen_movi_i64(psw_addr, dest);
1259 per_breaking_event(s);
1260 ret = DISAS_PC_UPDATED;
1262 } else {
1263 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1264 Most commonly we're single-stepping or some other condition that
1265 disables all use of goto_tb. Just update the PC and exit. */
1267 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1268 if (is_imm) {
1269 cdest = tcg_const_i64(dest);
1272 if (c->is_64) {
1273 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1274 cdest, next);
1275 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1276 } else {
1277 TCGv_i32 t0 = tcg_temp_new_i32();
1278 TCGv_i64 t1 = tcg_temp_new_i64();
1279 TCGv_i64 z = tcg_const_i64(0);
1280 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1281 tcg_gen_extu_i32_i64(t1, t0);
1282 tcg_temp_free_i32(t0);
1283 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1284 per_branch_cond(s, TCG_COND_NE, t1, z);
1285 tcg_temp_free_i64(t1);
1286 tcg_temp_free_i64(z);
1289 if (is_imm) {
1290 tcg_temp_free_i64(cdest);
1292 tcg_temp_free_i64(next);
1294 ret = DISAS_PC_UPDATED;
1297 egress:
1298 free_compare(c);
1299 return ret;
1302 /* ====================================================================== */
1303 /* The operations. These perform the bulk of the work for any insn,
1304 usually after the operands have been loaded and output initialized. */
1306 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1308 TCGv_i64 z, n;
1309 z = tcg_const_i64(0);
1310 n = tcg_temp_new_i64();
1311 tcg_gen_neg_i64(n, o->in2);
1312 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1313 tcg_temp_free_i64(n);
1314 tcg_temp_free_i64(z);
1315 return DISAS_NEXT;
1318 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1320 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1321 return DISAS_NEXT;
1324 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1326 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1327 return DISAS_NEXT;
1330 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1332 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1333 tcg_gen_mov_i64(o->out2, o->in2);
1334 return DISAS_NEXT;
1337 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1339 tcg_gen_add_i64(o->out, o->in1, o->in2);
1340 return DISAS_NEXT;
1343 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1345 DisasCompare cmp;
1346 TCGv_i64 carry;
1348 tcg_gen_add_i64(o->out, o->in1, o->in2);
1350 /* The carry flag is the msb of CC, therefore the branch mask that would
1351 create that comparison is 3. Feeding the generated comparison to
1352 setcond produces the carry flag that we desire. */
1353 disas_jcc(s, &cmp, 3);
1354 carry = tcg_temp_new_i64();
1355 if (cmp.is_64) {
1356 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1357 } else {
1358 TCGv_i32 t = tcg_temp_new_i32();
1359 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1360 tcg_gen_extu_i32_i64(carry, t);
1361 tcg_temp_free_i32(t);
1363 free_compare(&cmp);
1365 tcg_gen_add_i64(o->out, o->out, carry);
1366 tcg_temp_free_i64(carry);
1367 return DISAS_NEXT;
1370 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1372 o->in1 = tcg_temp_new_i64();
1374 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1375 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1376 } else {
1377 /* Perform the atomic addition in memory. */
1378 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1379 s->insn->data);
1382 /* Recompute also for atomic case: needed for setting CC. */
1383 tcg_gen_add_i64(o->out, o->in1, o->in2);
1385 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1386 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1388 return DISAS_NEXT;
1391 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1393 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1394 return DISAS_NEXT;
1397 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1399 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1400 return DISAS_NEXT;
1403 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1405 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1406 return_low128(o->out2);
1407 return DISAS_NEXT;
1410 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1412 tcg_gen_and_i64(o->out, o->in1, o->in2);
1413 return DISAS_NEXT;
1416 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1418 int shift = s->insn->data & 0xff;
1419 int size = s->insn->data >> 8;
1420 uint64_t mask = ((1ull << size) - 1) << shift;
1422 assert(!o->g_in2);
1423 tcg_gen_shli_i64(o->in2, o->in2, shift);
1424 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1425 tcg_gen_and_i64(o->out, o->in1, o->in2);
1427 /* Produce the CC from only the bits manipulated. */
1428 tcg_gen_andi_i64(cc_dst, o->out, mask);
1429 set_cc_nz_u64(s, cc_dst);
1430 return DISAS_NEXT;
1433 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1435 o->in1 = tcg_temp_new_i64();
1437 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1438 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1439 } else {
1440 /* Perform the atomic operation in memory. */
1441 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1442 s->insn->data);
1445 /* Recompute also for atomic case: needed for setting CC. */
1446 tcg_gen_and_i64(o->out, o->in1, o->in2);
1448 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1449 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1451 return DISAS_NEXT;
1454 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1456 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->pc_tmp));
1457 if (o->in2) {
1458 tcg_gen_mov_i64(psw_addr, o->in2);
1459 per_branch(s, false);
1460 return DISAS_PC_UPDATED;
1461 } else {
1462 return DISAS_NEXT;
1466 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1468 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->pc_tmp));
1469 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1472 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1474 int m1 = get_field(s->fields, m1);
1475 bool is_imm = have_field(s->fields, i2);
1476 int imm = is_imm ? get_field(s->fields, i2) : 0;
1477 DisasCompare c;
1479 /* BCR with R2 = 0 causes no branching */
1480 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1481 if (m1 == 14) {
1482 /* Perform serialization */
1483 /* FIXME: check for fast-BCR-serialization facility */
1484 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1486 if (m1 == 15) {
1487 /* Perform serialization */
1488 /* FIXME: perform checkpoint-synchronisation */
1489 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1491 return DISAS_NEXT;
1494 disas_jcc(s, &c, m1);
1495 return help_branch(s, &c, is_imm, imm, o->in2);
1498 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1500 int r1 = get_field(s->fields, r1);
1501 bool is_imm = have_field(s->fields, i2);
1502 int imm = is_imm ? get_field(s->fields, i2) : 0;
1503 DisasCompare c;
1504 TCGv_i64 t;
1506 c.cond = TCG_COND_NE;
1507 c.is_64 = false;
1508 c.g1 = false;
1509 c.g2 = false;
1511 t = tcg_temp_new_i64();
1512 tcg_gen_subi_i64(t, regs[r1], 1);
1513 store_reg32_i64(r1, t);
1514 c.u.s32.a = tcg_temp_new_i32();
1515 c.u.s32.b = tcg_const_i32(0);
1516 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1517 tcg_temp_free_i64(t);
1519 return help_branch(s, &c, is_imm, imm, o->in2);
1522 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1524 int r1 = get_field(s->fields, r1);
1525 int imm = get_field(s->fields, i2);
1526 DisasCompare c;
1527 TCGv_i64 t;
1529 c.cond = TCG_COND_NE;
1530 c.is_64 = false;
1531 c.g1 = false;
1532 c.g2 = false;
1534 t = tcg_temp_new_i64();
1535 tcg_gen_shri_i64(t, regs[r1], 32);
1536 tcg_gen_subi_i64(t, t, 1);
1537 store_reg32h_i64(r1, t);
1538 c.u.s32.a = tcg_temp_new_i32();
1539 c.u.s32.b = tcg_const_i32(0);
1540 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1541 tcg_temp_free_i64(t);
1543 return help_branch(s, &c, 1, imm, o->in2);
1546 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1548 int r1 = get_field(s->fields, r1);
1549 bool is_imm = have_field(s->fields, i2);
1550 int imm = is_imm ? get_field(s->fields, i2) : 0;
1551 DisasCompare c;
1553 c.cond = TCG_COND_NE;
1554 c.is_64 = true;
1555 c.g1 = true;
1556 c.g2 = false;
1558 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1559 c.u.s64.a = regs[r1];
1560 c.u.s64.b = tcg_const_i64(0);
1562 return help_branch(s, &c, is_imm, imm, o->in2);
1565 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1567 int r1 = get_field(s->fields, r1);
1568 int r3 = get_field(s->fields, r3);
1569 bool is_imm = have_field(s->fields, i2);
1570 int imm = is_imm ? get_field(s->fields, i2) : 0;
1571 DisasCompare c;
1572 TCGv_i64 t;
1574 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1575 c.is_64 = false;
1576 c.g1 = false;
1577 c.g2 = false;
1579 t = tcg_temp_new_i64();
1580 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1581 c.u.s32.a = tcg_temp_new_i32();
1582 c.u.s32.b = tcg_temp_new_i32();
1583 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1584 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1585 store_reg32_i64(r1, t);
1586 tcg_temp_free_i64(t);
1588 return help_branch(s, &c, is_imm, imm, o->in2);
1591 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1593 int r1 = get_field(s->fields, r1);
1594 int r3 = get_field(s->fields, r3);
1595 bool is_imm = have_field(s->fields, i2);
1596 int imm = is_imm ? get_field(s->fields, i2) : 0;
1597 DisasCompare c;
1599 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1600 c.is_64 = true;
1602 if (r1 == (r3 | 1)) {
1603 c.u.s64.b = load_reg(r3 | 1);
1604 c.g2 = false;
1605 } else {
1606 c.u.s64.b = regs[r3 | 1];
1607 c.g2 = true;
1610 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1611 c.u.s64.a = regs[r1];
1612 c.g1 = true;
1614 return help_branch(s, &c, is_imm, imm, o->in2);
1617 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1619 int imm, m3 = get_field(s->fields, m3);
1620 bool is_imm;
1621 DisasCompare c;
1623 c.cond = ltgt_cond[m3];
1624 if (s->insn->data) {
1625 c.cond = tcg_unsigned_cond(c.cond);
1627 c.is_64 = c.g1 = c.g2 = true;
1628 c.u.s64.a = o->in1;
1629 c.u.s64.b = o->in2;
1631 is_imm = have_field(s->fields, i4);
1632 if (is_imm) {
1633 imm = get_field(s->fields, i4);
1634 } else {
1635 imm = 0;
1636 o->out = get_address(s, 0, get_field(s->fields, b4),
1637 get_field(s->fields, d4));
1640 return help_branch(s, &c, is_imm, imm, o->out);
1643 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1645 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1646 set_cc_static(s);
1647 return DISAS_NEXT;
1650 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1652 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1653 set_cc_static(s);
1654 return DISAS_NEXT;
1657 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1659 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1660 set_cc_static(s);
1661 return DISAS_NEXT;
1664 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1666 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1667 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1668 tcg_temp_free_i32(m3);
1669 gen_set_cc_nz_f32(s, o->in2);
1670 return DISAS_NEXT;
1673 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1675 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1676 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1677 tcg_temp_free_i32(m3);
1678 gen_set_cc_nz_f64(s, o->in2);
1679 return DISAS_NEXT;
1682 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1684 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1685 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1686 tcg_temp_free_i32(m3);
1687 gen_set_cc_nz_f128(s, o->in1, o->in2);
1688 return DISAS_NEXT;
1691 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1693 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1694 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1695 tcg_temp_free_i32(m3);
1696 gen_set_cc_nz_f32(s, o->in2);
1697 return DISAS_NEXT;
1700 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1702 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1703 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1704 tcg_temp_free_i32(m3);
1705 gen_set_cc_nz_f64(s, o->in2);
1706 return DISAS_NEXT;
1709 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1711 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1712 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1713 tcg_temp_free_i32(m3);
1714 gen_set_cc_nz_f128(s, o->in1, o->in2);
1715 return DISAS_NEXT;
1718 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1720 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1721 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1722 tcg_temp_free_i32(m3);
1723 gen_set_cc_nz_f32(s, o->in2);
1724 return DISAS_NEXT;
1727 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1729 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1730 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1731 tcg_temp_free_i32(m3);
1732 gen_set_cc_nz_f64(s, o->in2);
1733 return DISAS_NEXT;
1736 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1738 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1739 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1740 tcg_temp_free_i32(m3);
1741 gen_set_cc_nz_f128(s, o->in1, o->in2);
1742 return DISAS_NEXT;
1745 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1747 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1748 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1749 tcg_temp_free_i32(m3);
1750 gen_set_cc_nz_f32(s, o->in2);
1751 return DISAS_NEXT;
1754 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1756 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1757 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1758 tcg_temp_free_i32(m3);
1759 gen_set_cc_nz_f64(s, o->in2);
1760 return DISAS_NEXT;
1763 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1765 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1766 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1767 tcg_temp_free_i32(m3);
1768 gen_set_cc_nz_f128(s, o->in1, o->in2);
1769 return DISAS_NEXT;
1772 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1774 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1775 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1776 tcg_temp_free_i32(m3);
1777 return DISAS_NEXT;
1780 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1782 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1784 tcg_temp_free_i32(m3);
1785 return DISAS_NEXT;
1788 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 return_low128(o->out2);
1794 return DISAS_NEXT;
1797 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 return DISAS_NEXT;
1805 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1807 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1808 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1809 tcg_temp_free_i32(m3);
1810 return DISAS_NEXT;
1813 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1815 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1816 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1817 tcg_temp_free_i32(m3);
1818 return_low128(o->out2);
1819 return DISAS_NEXT;
1822 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1824 int r2 = get_field(s->fields, r2);
1825 TCGv_i64 len = tcg_temp_new_i64();
1827 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1828 set_cc_static(s);
1829 return_low128(o->out);
1831 tcg_gen_add_i64(regs[r2], regs[r2], len);
1832 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1833 tcg_temp_free_i64(len);
1835 return DISAS_NEXT;
1838 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1840 int l = get_field(s->fields, l1);
1841 TCGv_i32 vl;
1843 switch (l + 1) {
1844 case 1:
1845 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1846 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1847 break;
1848 case 2:
1849 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1850 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1851 break;
1852 case 4:
1853 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1854 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1855 break;
1856 case 8:
1857 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1858 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1859 break;
1860 default:
1861 vl = tcg_const_i32(l);
1862 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1863 tcg_temp_free_i32(vl);
1864 set_cc_static(s);
1865 return DISAS_NEXT;
1867 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1868 return DISAS_NEXT;
1871 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1873 int r1 = get_field(s->fields, r1);
1874 int r2 = get_field(s->fields, r2);
1875 TCGv_i32 t1, t2;
1877 /* r1 and r2 must be even. */
1878 if (r1 & 1 || r2 & 1) {
1879 gen_program_exception(s, PGM_SPECIFICATION);
1880 return DISAS_NORETURN;
1883 t1 = tcg_const_i32(r1);
1884 t2 = tcg_const_i32(r2);
1885 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1886 tcg_temp_free_i32(t1);
1887 tcg_temp_free_i32(t2);
1888 set_cc_static(s);
1889 return DISAS_NEXT;
1892 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
1894 int r1 = get_field(s->fields, r1);
1895 int r3 = get_field(s->fields, r3);
1896 TCGv_i32 t1, t3;
1898 /* r1 and r3 must be even. */
1899 if (r1 & 1 || r3 & 1) {
1900 gen_program_exception(s, PGM_SPECIFICATION);
1901 return DISAS_NORETURN;
1904 t1 = tcg_const_i32(r1);
1905 t3 = tcg_const_i32(r3);
1906 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1907 tcg_temp_free_i32(t1);
1908 tcg_temp_free_i32(t3);
1909 set_cc_static(s);
1910 return DISAS_NEXT;
1913 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
1915 int r1 = get_field(s->fields, r1);
1916 int r3 = get_field(s->fields, r3);
1917 TCGv_i32 t1, t3;
1919 /* r1 and r3 must be even. */
1920 if (r1 & 1 || r3 & 1) {
1921 gen_program_exception(s, PGM_SPECIFICATION);
1922 return DISAS_NORETURN;
1925 t1 = tcg_const_i32(r1);
1926 t3 = tcg_const_i32(r3);
1927 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1928 tcg_temp_free_i32(t1);
1929 tcg_temp_free_i32(t3);
1930 set_cc_static(s);
1931 return DISAS_NEXT;
1934 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
1936 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1937 TCGv_i32 t1 = tcg_temp_new_i32();
1938 tcg_gen_extrl_i64_i32(t1, o->in1);
1939 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1940 set_cc_static(s);
1941 tcg_temp_free_i32(t1);
1942 tcg_temp_free_i32(m3);
1943 return DISAS_NEXT;
1946 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
1948 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1949 set_cc_static(s);
1950 return_low128(o->in2);
1951 return DISAS_NEXT;
1954 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
1956 TCGv_i64 t = tcg_temp_new_i64();
1957 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1958 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1959 tcg_gen_or_i64(o->out, o->out, t);
1960 tcg_temp_free_i64(t);
1961 return DISAS_NEXT;
1964 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
1966 int d2 = get_field(s->fields, d2);
1967 int b2 = get_field(s->fields, b2);
1968 TCGv_i64 addr, cc;
1970 /* Note that in1 = R3 (new value) and
1971 in2 = (zero-extended) R1 (expected value). */
1973 addr = get_address(s, 0, b2, d2);
1974 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1975 get_mem_index(s), s->insn->data | MO_ALIGN);
1976 tcg_temp_free_i64(addr);
1978 /* Are the memory and expected values (un)equal? Note that this setcond
1979 produces the output CC value, thus the NE sense of the test. */
1980 cc = tcg_temp_new_i64();
1981 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1982 tcg_gen_extrl_i64_i32(cc_op, cc);
1983 tcg_temp_free_i64(cc);
1984 set_cc_static(s);
1986 return DISAS_NEXT;
1989 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
1991 int r1 = get_field(s->fields, r1);
1992 int r3 = get_field(s->fields, r3);
1993 int d2 = get_field(s->fields, d2);
1994 int b2 = get_field(s->fields, b2);
1995 TCGv_i64 addr;
1996 TCGv_i32 t_r1, t_r3;
1998 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1999 addr = get_address(s, 0, b2, d2);
2000 t_r1 = tcg_const_i32(r1);
2001 t_r3 = tcg_const_i32(r3);
2002 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2003 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2004 } else {
2005 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2007 tcg_temp_free_i64(addr);
2008 tcg_temp_free_i32(t_r1);
2009 tcg_temp_free_i32(t_r3);
2011 set_cc_static(s);
2012 return DISAS_NEXT;
2015 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2017 int r3 = get_field(s->fields, r3);
2018 TCGv_i32 t_r3 = tcg_const_i32(r3);
2020 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2021 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
2022 } else {
2023 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
2025 tcg_temp_free_i32(t_r3);
2027 set_cc_static(s);
2028 return DISAS_NEXT;
2031 #ifndef CONFIG_USER_ONLY
2032 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2034 TCGMemOp mop = s->insn->data;
2035 TCGv_i64 addr, old, cc;
2036 TCGLabel *lab = gen_new_label();
2038 /* Note that in1 = R1 (zero-extended expected value),
2039 out = R1 (original reg), out2 = R1+1 (new value). */
2041 check_privileged(s);
2042 addr = tcg_temp_new_i64();
2043 old = tcg_temp_new_i64();
2044 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2045 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2046 get_mem_index(s), mop | MO_ALIGN);
2047 tcg_temp_free_i64(addr);
2049 /* Are the memory and expected values (un)equal? */
2050 cc = tcg_temp_new_i64();
2051 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2052 tcg_gen_extrl_i64_i32(cc_op, cc);
2054 /* Write back the output now, so that it happens before the
2055 following branch, so that we don't need local temps. */
2056 if ((mop & MO_SIZE) == MO_32) {
2057 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2058 } else {
2059 tcg_gen_mov_i64(o->out, old);
2061 tcg_temp_free_i64(old);
2063 /* If the comparison was equal, and the LSB of R2 was set,
2064 then we need to flush the TLB (for all cpus). */
2065 tcg_gen_xori_i64(cc, cc, 1);
2066 tcg_gen_and_i64(cc, cc, o->in2);
2067 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2068 tcg_temp_free_i64(cc);
2070 gen_helper_purge(cpu_env);
2071 gen_set_label(lab);
2073 return DISAS_NEXT;
2075 #endif
2077 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2079 TCGv_i64 t1 = tcg_temp_new_i64();
2080 TCGv_i32 t2 = tcg_temp_new_i32();
2081 tcg_gen_extrl_i64_i32(t2, o->in1);
2082 gen_helper_cvd(t1, t2);
2083 tcg_temp_free_i32(t2);
2084 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2085 tcg_temp_free_i64(t1);
2086 return DISAS_NEXT;
2089 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2091 int m3 = get_field(s->fields, m3);
2092 TCGLabel *lab = gen_new_label();
2093 TCGCond c;
2095 c = tcg_invert_cond(ltgt_cond[m3]);
2096 if (s->insn->data) {
2097 c = tcg_unsigned_cond(c);
2099 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2101 /* Trap. */
2102 gen_trap(s);
2104 gen_set_label(lab);
2105 return DISAS_NEXT;
2108 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2110 int m3 = get_field(s->fields, m3);
2111 int r1 = get_field(s->fields, r1);
2112 int r2 = get_field(s->fields, r2);
2113 TCGv_i32 tr1, tr2, chk;
2115 /* R1 and R2 must both be even. */
2116 if ((r1 | r2) & 1) {
2117 gen_program_exception(s, PGM_SPECIFICATION);
2118 return DISAS_NORETURN;
2120 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2121 m3 = 0;
2124 tr1 = tcg_const_i32(r1);
2125 tr2 = tcg_const_i32(r2);
2126 chk = tcg_const_i32(m3);
2128 switch (s->insn->data) {
2129 case 12:
2130 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2131 break;
2132 case 14:
2133 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2134 break;
2135 case 21:
2136 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2137 break;
2138 case 24:
2139 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2140 break;
2141 case 41:
2142 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2143 break;
2144 case 42:
2145 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2146 break;
2147 default:
2148 g_assert_not_reached();
2151 tcg_temp_free_i32(tr1);
2152 tcg_temp_free_i32(tr2);
2153 tcg_temp_free_i32(chk);
2154 set_cc_static(s);
2155 return DISAS_NEXT;
2158 #ifndef CONFIG_USER_ONLY
2159 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2161 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2162 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2163 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2165 check_privileged(s);
2166 gen_helper_diag(cpu_env, r1, r3, func_code);
2168 tcg_temp_free_i32(func_code);
2169 tcg_temp_free_i32(r3);
2170 tcg_temp_free_i32(r1);
2171 return DISAS_NEXT;
2173 #endif
2175 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2177 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2178 return_low128(o->out);
2179 return DISAS_NEXT;
2182 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2184 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2185 return_low128(o->out);
2186 return DISAS_NEXT;
2189 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2191 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2192 return_low128(o->out);
2193 return DISAS_NEXT;
2196 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2198 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2199 return_low128(o->out);
2200 return DISAS_NEXT;
2203 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2205 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2206 return DISAS_NEXT;
2209 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2211 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2212 return DISAS_NEXT;
2215 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2217 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2218 return_low128(o->out2);
2219 return DISAS_NEXT;
2222 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2224 int r2 = get_field(s->fields, r2);
2225 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2226 return DISAS_NEXT;
2229 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2231 /* No cache information provided. */
2232 tcg_gen_movi_i64(o->out, -1);
2233 return DISAS_NEXT;
2236 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2238 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2239 return DISAS_NEXT;
2242 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2244 int r1 = get_field(s->fields, r1);
2245 int r2 = get_field(s->fields, r2);
2246 TCGv_i64 t = tcg_temp_new_i64();
2248 /* Note the "subsequently" in the PoO, which implies a defined result
2249 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2250 tcg_gen_shri_i64(t, psw_mask, 32);
2251 store_reg32_i64(r1, t);
2252 if (r2 != 0) {
2253 store_reg32_i64(r2, psw_mask);
2256 tcg_temp_free_i64(t);
2257 return DISAS_NEXT;
2260 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2262 int r1 = get_field(s->fields, r1);
2263 TCGv_i32 ilen;
2264 TCGv_i64 v1;
2266 /* Nested EXECUTE is not allowed. */
2267 if (unlikely(s->ex_value)) {
2268 gen_program_exception(s, PGM_EXECUTE);
2269 return DISAS_NORETURN;
2272 update_psw_addr(s);
2273 update_cc_op(s);
2275 if (r1 == 0) {
2276 v1 = tcg_const_i64(0);
2277 } else {
2278 v1 = regs[r1];
2281 ilen = tcg_const_i32(s->ilen);
2282 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2283 tcg_temp_free_i32(ilen);
2285 if (r1 == 0) {
2286 tcg_temp_free_i64(v1);
2289 return DISAS_PC_CC_UPDATED;
2292 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2294 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2295 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2296 tcg_temp_free_i32(m3);
2297 return DISAS_NEXT;
2300 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2302 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2303 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2304 tcg_temp_free_i32(m3);
2305 return DISAS_NEXT;
2308 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2310 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2311 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2312 return_low128(o->out2);
2313 tcg_temp_free_i32(m3);
2314 return DISAS_NEXT;
2317 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2319 /* We'll use the original input for cc computation, since we get to
2320 compare that against 0, which ought to be better than comparing
2321 the real output against 64. It also lets cc_dst be a convenient
2322 temporary during our computation. */
2323 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2325 /* R1 = IN ? CLZ(IN) : 64. */
2326 tcg_gen_clzi_i64(o->out, o->in2, 64);
2328 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2329 value by 64, which is undefined. But since the shift is 64 iff the
2330 input is zero, we still get the correct result after and'ing. */
2331 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2332 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2333 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2334 return DISAS_NEXT;
2337 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2339 int m3 = get_field(s->fields, m3);
2340 int pos, len, base = s->insn->data;
2341 TCGv_i64 tmp = tcg_temp_new_i64();
2342 uint64_t ccm;
2344 switch (m3) {
2345 case 0xf:
2346 /* Effectively a 32-bit load. */
2347 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2348 len = 32;
2349 goto one_insert;
2351 case 0xc:
2352 case 0x6:
2353 case 0x3:
2354 /* Effectively a 16-bit load. */
2355 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2356 len = 16;
2357 goto one_insert;
2359 case 0x8:
2360 case 0x4:
2361 case 0x2:
2362 case 0x1:
2363 /* Effectively an 8-bit load. */
2364 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2365 len = 8;
2366 goto one_insert;
2368 one_insert:
2369 pos = base + ctz32(m3) * 8;
2370 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2371 ccm = ((1ull << len) - 1) << pos;
2372 break;
2374 default:
2375 /* This is going to be a sequence of loads and inserts. */
2376 pos = base + 32 - 8;
2377 ccm = 0;
2378 while (m3) {
2379 if (m3 & 0x8) {
2380 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2381 tcg_gen_addi_i64(o->in2, o->in2, 1);
2382 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2383 ccm |= 0xff << pos;
2385 m3 = (m3 << 1) & 0xf;
2386 pos -= 8;
2388 break;
2391 tcg_gen_movi_i64(tmp, ccm);
2392 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2393 tcg_temp_free_i64(tmp);
2394 return DISAS_NEXT;
2397 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2399 int shift = s->insn->data & 0xff;
2400 int size = s->insn->data >> 8;
2401 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2402 return DISAS_NEXT;
2405 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2407 TCGv_i64 t1;
2409 gen_op_calc_cc(s);
2410 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2412 t1 = tcg_temp_new_i64();
2413 tcg_gen_shli_i64(t1, psw_mask, 20);
2414 tcg_gen_shri_i64(t1, t1, 36);
2415 tcg_gen_or_i64(o->out, o->out, t1);
2417 tcg_gen_extu_i32_i64(t1, cc_op);
2418 tcg_gen_shli_i64(t1, t1, 28);
2419 tcg_gen_or_i64(o->out, o->out, t1);
2420 tcg_temp_free_i64(t1);
2421 return DISAS_NEXT;
2424 #ifndef CONFIG_USER_ONLY
2425 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2427 TCGv_i32 m4;
2429 check_privileged(s);
2430 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2431 m4 = tcg_const_i32(get_field(s->fields, m4));
2432 } else {
2433 m4 = tcg_const_i32(0);
2435 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2436 tcg_temp_free_i32(m4);
2437 return DISAS_NEXT;
2440 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2442 TCGv_i32 m4;
2444 check_privileged(s);
2445 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2446 m4 = tcg_const_i32(get_field(s->fields, m4));
2447 } else {
2448 m4 = tcg_const_i32(0);
2450 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2451 tcg_temp_free_i32(m4);
2452 return DISAS_NEXT;
2455 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2457 check_privileged(s);
2458 gen_helper_iske(o->out, cpu_env, o->in2);
2459 return DISAS_NEXT;
2461 #endif
2463 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2465 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2466 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2467 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2468 TCGv_i32 t_r1, t_r2, t_r3, type;
2470 switch (s->insn->data) {
2471 case S390_FEAT_TYPE_KMCTR:
2472 if (r3 & 1 || !r3) {
2473 gen_program_exception(s, PGM_SPECIFICATION);
2474 return DISAS_NORETURN;
2476 /* FALL THROUGH */
2477 case S390_FEAT_TYPE_PPNO:
2478 case S390_FEAT_TYPE_KMF:
2479 case S390_FEAT_TYPE_KMC:
2480 case S390_FEAT_TYPE_KMO:
2481 case S390_FEAT_TYPE_KM:
2482 if (r1 & 1 || !r1) {
2483 gen_program_exception(s, PGM_SPECIFICATION);
2484 return DISAS_NORETURN;
2486 /* FALL THROUGH */
2487 case S390_FEAT_TYPE_KMAC:
2488 case S390_FEAT_TYPE_KIMD:
2489 case S390_FEAT_TYPE_KLMD:
2490 if (r2 & 1 || !r2) {
2491 gen_program_exception(s, PGM_SPECIFICATION);
2492 return DISAS_NORETURN;
2494 /* FALL THROUGH */
2495 case S390_FEAT_TYPE_PCKMO:
2496 case S390_FEAT_TYPE_PCC:
2497 break;
2498 default:
2499 g_assert_not_reached();
2502 t_r1 = tcg_const_i32(r1);
2503 t_r2 = tcg_const_i32(r2);
2504 t_r3 = tcg_const_i32(r3);
2505 type = tcg_const_i32(s->insn->data);
2506 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2507 set_cc_static(s);
2508 tcg_temp_free_i32(t_r1);
2509 tcg_temp_free_i32(t_r2);
2510 tcg_temp_free_i32(t_r3);
2511 tcg_temp_free_i32(type);
2512 return DISAS_NEXT;
2515 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2517 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2518 set_cc_static(s);
2519 return DISAS_NEXT;
2522 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2524 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2525 set_cc_static(s);
2526 return DISAS_NEXT;
2529 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2531 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2532 set_cc_static(s);
2533 return DISAS_NEXT;
2536 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2538 /* The real output is indeed the original value in memory;
2539 recompute the addition for the computation of CC. */
2540 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2541 s->insn->data | MO_ALIGN);
2542 /* However, we need to recompute the addition for setting CC. */
2543 tcg_gen_add_i64(o->out, o->in1, o->in2);
2544 return DISAS_NEXT;
2547 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2549 /* The real output is indeed the original value in memory;
2550 recompute the addition for the computation of CC. */
2551 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2552 s->insn->data | MO_ALIGN);
2553 /* However, we need to recompute the operation for setting CC. */
2554 tcg_gen_and_i64(o->out, o->in1, o->in2);
2555 return DISAS_NEXT;
2558 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2560 /* The real output is indeed the original value in memory;
2561 recompute the addition for the computation of CC. */
2562 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2563 s->insn->data | MO_ALIGN);
2564 /* However, we need to recompute the operation for setting CC. */
2565 tcg_gen_or_i64(o->out, o->in1, o->in2);
2566 return DISAS_NEXT;
2569 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2571 /* The real output is indeed the original value in memory;
2572 recompute the addition for the computation of CC. */
2573 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2574 s->insn->data | MO_ALIGN);
2575 /* However, we need to recompute the operation for setting CC. */
2576 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2577 return DISAS_NEXT;
2580 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2582 gen_helper_ldeb(o->out, cpu_env, o->in2);
2583 return DISAS_NEXT;
2586 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2588 gen_helper_ledb(o->out, cpu_env, o->in2);
2589 return DISAS_NEXT;
2592 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2594 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2595 return DISAS_NEXT;
2598 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2600 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2601 return DISAS_NEXT;
2604 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2606 gen_helper_lxdb(o->out, cpu_env, o->in2);
2607 return_low128(o->out2);
2608 return DISAS_NEXT;
2611 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2613 gen_helper_lxeb(o->out, cpu_env, o->in2);
2614 return_low128(o->out2);
2615 return DISAS_NEXT;
2618 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2620 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2621 return DISAS_NEXT;
2624 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2626 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2627 return DISAS_NEXT;
2630 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2632 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2633 return DISAS_NEXT;
2636 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2638 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2639 return DISAS_NEXT;
2642 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2644 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2645 return DISAS_NEXT;
2648 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2650 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2651 return DISAS_NEXT;
2654 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2656 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2657 return DISAS_NEXT;
2660 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2662 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2663 return DISAS_NEXT;
2666 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2668 TCGLabel *lab = gen_new_label();
2669 store_reg32_i64(get_field(s->fields, r1), o->in2);
2670 /* The value is stored even in case of trap. */
2671 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2672 gen_trap(s);
2673 gen_set_label(lab);
2674 return DISAS_NEXT;
2677 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2679 TCGLabel *lab = gen_new_label();
2680 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2681 /* The value is stored even in case of trap. */
2682 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2683 gen_trap(s);
2684 gen_set_label(lab);
2685 return DISAS_NEXT;
2688 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2690 TCGLabel *lab = gen_new_label();
2691 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2692 /* The value is stored even in case of trap. */
2693 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2694 gen_trap(s);
2695 gen_set_label(lab);
2696 return DISAS_NEXT;
2699 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2701 TCGLabel *lab = gen_new_label();
2702 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2703 /* The value is stored even in case of trap. */
2704 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2705 gen_trap(s);
2706 gen_set_label(lab);
2707 return DISAS_NEXT;
2710 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2712 TCGLabel *lab = gen_new_label();
2713 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2714 /* The value is stored even in case of trap. */
2715 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2716 gen_trap(s);
2717 gen_set_label(lab);
2718 return DISAS_NEXT;
2721 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2723 DisasCompare c;
2725 disas_jcc(s, &c, get_field(s->fields, m3));
2727 if (c.is_64) {
2728 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2729 o->in2, o->in1);
2730 free_compare(&c);
2731 } else {
2732 TCGv_i32 t32 = tcg_temp_new_i32();
2733 TCGv_i64 t, z;
2735 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2736 free_compare(&c);
2738 t = tcg_temp_new_i64();
2739 tcg_gen_extu_i32_i64(t, t32);
2740 tcg_temp_free_i32(t32);
2742 z = tcg_const_i64(0);
2743 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2744 tcg_temp_free_i64(t);
2745 tcg_temp_free_i64(z);
2748 return DISAS_NEXT;
2751 #ifndef CONFIG_USER_ONLY
2752 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2754 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2755 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2756 check_privileged(s);
2757 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2758 tcg_temp_free_i32(r1);
2759 tcg_temp_free_i32(r3);
2760 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2761 return DISAS_PC_STALE_NOCHAIN;
2764 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2766 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2767 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2768 check_privileged(s);
2769 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2770 tcg_temp_free_i32(r1);
2771 tcg_temp_free_i32(r3);
2772 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2773 return DISAS_PC_STALE_NOCHAIN;
2776 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2778 check_privileged(s);
2779 gen_helper_lra(o->out, cpu_env, o->in2);
2780 set_cc_static(s);
2781 return DISAS_NEXT;
2784 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2786 check_privileged(s);
2788 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2789 return DISAS_NEXT;
2792 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2794 TCGv_i64 t1, t2;
2796 check_privileged(s);
2797 per_breaking_event(s);
2799 t1 = tcg_temp_new_i64();
2800 t2 = tcg_temp_new_i64();
2801 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2802 tcg_gen_addi_i64(o->in2, o->in2, 4);
2803 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2804 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2805 tcg_gen_shli_i64(t1, t1, 32);
2806 gen_helper_load_psw(cpu_env, t1, t2);
2807 tcg_temp_free_i64(t1);
2808 tcg_temp_free_i64(t2);
2809 return DISAS_NORETURN;
2812 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2814 TCGv_i64 t1, t2;
2816 check_privileged(s);
2817 per_breaking_event(s);
2819 t1 = tcg_temp_new_i64();
2820 t2 = tcg_temp_new_i64();
2821 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2822 tcg_gen_addi_i64(o->in2, o->in2, 8);
2823 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2824 gen_helper_load_psw(cpu_env, t1, t2);
2825 tcg_temp_free_i64(t1);
2826 tcg_temp_free_i64(t2);
2827 return DISAS_NORETURN;
2829 #endif
2831 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2833 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2834 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2835 gen_helper_lam(cpu_env, r1, o->in2, r3);
2836 tcg_temp_free_i32(r1);
2837 tcg_temp_free_i32(r3);
2838 return DISAS_NEXT;
2841 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2843 int r1 = get_field(s->fields, r1);
2844 int r3 = get_field(s->fields, r3);
2845 TCGv_i64 t1, t2;
2847 /* Only one register to read. */
2848 t1 = tcg_temp_new_i64();
2849 if (unlikely(r1 == r3)) {
2850 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2851 store_reg32_i64(r1, t1);
2852 tcg_temp_free(t1);
2853 return DISAS_NEXT;
2856 /* First load the values of the first and last registers to trigger
2857 possible page faults. */
2858 t2 = tcg_temp_new_i64();
2859 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2860 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2861 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2862 store_reg32_i64(r1, t1);
2863 store_reg32_i64(r3, t2);
2865 /* Only two registers to read. */
2866 if (((r1 + 1) & 15) == r3) {
2867 tcg_temp_free(t2);
2868 tcg_temp_free(t1);
2869 return DISAS_NEXT;
2872 /* Then load the remaining registers. Page fault can't occur. */
2873 r3 = (r3 - 1) & 15;
2874 tcg_gen_movi_i64(t2, 4);
2875 while (r1 != r3) {
2876 r1 = (r1 + 1) & 15;
2877 tcg_gen_add_i64(o->in2, o->in2, t2);
2878 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2879 store_reg32_i64(r1, t1);
2881 tcg_temp_free(t2);
2882 tcg_temp_free(t1);
2884 return DISAS_NEXT;
2887 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2889 int r1 = get_field(s->fields, r1);
2890 int r3 = get_field(s->fields, r3);
2891 TCGv_i64 t1, t2;
2893 /* Only one register to read. */
2894 t1 = tcg_temp_new_i64();
2895 if (unlikely(r1 == r3)) {
2896 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2897 store_reg32h_i64(r1, t1);
2898 tcg_temp_free(t1);
2899 return DISAS_NEXT;
2902 /* First load the values of the first and last registers to trigger
2903 possible page faults. */
2904 t2 = tcg_temp_new_i64();
2905 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2906 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2907 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2908 store_reg32h_i64(r1, t1);
2909 store_reg32h_i64(r3, t2);
2911 /* Only two registers to read. */
2912 if (((r1 + 1) & 15) == r3) {
2913 tcg_temp_free(t2);
2914 tcg_temp_free(t1);
2915 return DISAS_NEXT;
2918 /* Then load the remaining registers. Page fault can't occur. */
2919 r3 = (r3 - 1) & 15;
2920 tcg_gen_movi_i64(t2, 4);
2921 while (r1 != r3) {
2922 r1 = (r1 + 1) & 15;
2923 tcg_gen_add_i64(o->in2, o->in2, t2);
2924 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2925 store_reg32h_i64(r1, t1);
2927 tcg_temp_free(t2);
2928 tcg_temp_free(t1);
2930 return DISAS_NEXT;
2933 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
2935 int r1 = get_field(s->fields, r1);
2936 int r3 = get_field(s->fields, r3);
2937 TCGv_i64 t1, t2;
2939 /* Only one register to read. */
2940 if (unlikely(r1 == r3)) {
2941 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2942 return DISAS_NEXT;
2945 /* First load the values of the first and last registers to trigger
2946 possible page faults. */
2947 t1 = tcg_temp_new_i64();
2948 t2 = tcg_temp_new_i64();
2949 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2950 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2951 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2952 tcg_gen_mov_i64(regs[r1], t1);
2953 tcg_temp_free(t2);
2955 /* Only two registers to read. */
2956 if (((r1 + 1) & 15) == r3) {
2957 tcg_temp_free(t1);
2958 return DISAS_NEXT;
2961 /* Then load the remaining registers. Page fault can't occur. */
2962 r3 = (r3 - 1) & 15;
2963 tcg_gen_movi_i64(t1, 8);
2964 while (r1 != r3) {
2965 r1 = (r1 + 1) & 15;
2966 tcg_gen_add_i64(o->in2, o->in2, t1);
2967 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2969 tcg_temp_free(t1);
2971 return DISAS_NEXT;
2974 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
2976 TCGv_i64 a1, a2;
2977 TCGMemOp mop = s->insn->data;
2979 /* In a parallel context, stop the world and single step. */
2980 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2981 update_psw_addr(s);
2982 update_cc_op(s);
2983 gen_exception(EXCP_ATOMIC);
2984 return DISAS_NORETURN;
2987 /* In a serial context, perform the two loads ... */
2988 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2989 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2990 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2991 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2992 tcg_temp_free_i64(a1);
2993 tcg_temp_free_i64(a2);
2995 /* ... and indicate that we performed them while interlocked. */
2996 gen_op_movi_cc(s, 0);
2997 return DISAS_NEXT;
3000 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3002 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3003 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3004 } else {
3005 gen_helper_lpq(o->out, cpu_env, o->in2);
3007 return_low128(o->out2);
3008 return DISAS_NEXT;
3011 #ifndef CONFIG_USER_ONLY
3012 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3014 check_privileged(s);
3015 gen_helper_lura(o->out, cpu_env, o->in2);
3016 return DISAS_NEXT;
3019 static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3021 check_privileged(s);
3022 gen_helper_lurag(o->out, cpu_env, o->in2);
3023 return DISAS_NEXT;
3025 #endif
3027 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3029 tcg_gen_andi_i64(o->out, o->in2, -256);
3030 return DISAS_NEXT;
3033 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3035 o->out = o->in2;
3036 o->g_out = o->g_in2;
3037 o->in2 = NULL;
3038 o->g_in2 = false;
3039 return DISAS_NEXT;
3042 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3044 int b2 = get_field(s->fields, b2);
3045 TCGv ar1 = tcg_temp_new_i64();
3047 o->out = o->in2;
3048 o->g_out = o->g_in2;
3049 o->in2 = NULL;
3050 o->g_in2 = false;
3052 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3053 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3054 tcg_gen_movi_i64(ar1, 0);
3055 break;
3056 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3057 tcg_gen_movi_i64(ar1, 1);
3058 break;
3059 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3060 if (b2) {
3061 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3062 } else {
3063 tcg_gen_movi_i64(ar1, 0);
3065 break;
3066 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3067 tcg_gen_movi_i64(ar1, 2);
3068 break;
3071 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3072 tcg_temp_free_i64(ar1);
3074 return DISAS_NEXT;
3077 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3079 o->out = o->in1;
3080 o->out2 = o->in2;
3081 o->g_out = o->g_in1;
3082 o->g_out2 = o->g_in2;
3083 o->in1 = NULL;
3084 o->in2 = NULL;
3085 o->g_in1 = o->g_in2 = false;
3086 return DISAS_NEXT;
3089 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3091 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3092 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3093 tcg_temp_free_i32(l);
3094 return DISAS_NEXT;
3097 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3099 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3100 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3101 tcg_temp_free_i32(l);
3102 return DISAS_NEXT;
3105 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3107 int r1 = get_field(s->fields, r1);
3108 int r2 = get_field(s->fields, r2);
3109 TCGv_i32 t1, t2;
3111 /* r1 and r2 must be even. */
3112 if (r1 & 1 || r2 & 1) {
3113 gen_program_exception(s, PGM_SPECIFICATION);
3114 return DISAS_NORETURN;
3117 t1 = tcg_const_i32(r1);
3118 t2 = tcg_const_i32(r2);
3119 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3120 tcg_temp_free_i32(t1);
3121 tcg_temp_free_i32(t2);
3122 set_cc_static(s);
3123 return DISAS_NEXT;
3126 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3128 int r1 = get_field(s->fields, r1);
3129 int r3 = get_field(s->fields, r3);
3130 TCGv_i32 t1, t3;
3132 /* r1 and r3 must be even. */
3133 if (r1 & 1 || r3 & 1) {
3134 gen_program_exception(s, PGM_SPECIFICATION);
3135 return DISAS_NORETURN;
3138 t1 = tcg_const_i32(r1);
3139 t3 = tcg_const_i32(r3);
3140 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3141 tcg_temp_free_i32(t1);
3142 tcg_temp_free_i32(t3);
3143 set_cc_static(s);
3144 return DISAS_NEXT;
3147 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3149 int r1 = get_field(s->fields, r1);
3150 int r3 = get_field(s->fields, r3);
3151 TCGv_i32 t1, t3;
3153 /* r1 and r3 must be even. */
3154 if (r1 & 1 || r3 & 1) {
3155 gen_program_exception(s, PGM_SPECIFICATION);
3156 return DISAS_NORETURN;
3159 t1 = tcg_const_i32(r1);
3160 t3 = tcg_const_i32(r3);
3161 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3162 tcg_temp_free_i32(t1);
3163 tcg_temp_free_i32(t3);
3164 set_cc_static(s);
3165 return DISAS_NEXT;
3168 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3170 int r3 = get_field(s->fields, r3);
3171 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3172 set_cc_static(s);
3173 return DISAS_NEXT;
3176 #ifndef CONFIG_USER_ONLY
3177 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3179 int r1 = get_field(s->fields, l1);
3180 check_privileged(s);
3181 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3182 set_cc_static(s);
3183 return DISAS_NEXT;
3186 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3188 int r1 = get_field(s->fields, l1);
3189 check_privileged(s);
3190 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3191 set_cc_static(s);
3192 return DISAS_NEXT;
3194 #endif
3196 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3198 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3199 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3200 tcg_temp_free_i32(l);
3201 return DISAS_NEXT;
3204 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3206 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3207 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3208 tcg_temp_free_i32(l);
3209 return DISAS_NEXT;
3212 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3214 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3215 set_cc_static(s);
3216 return DISAS_NEXT;
3219 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3221 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3222 set_cc_static(s);
3223 return_low128(o->in2);
3224 return DISAS_NEXT;
3227 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3229 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3230 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3231 tcg_temp_free_i32(l);
3232 return DISAS_NEXT;
3235 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3237 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3238 return DISAS_NEXT;
3241 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3243 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3244 return DISAS_NEXT;
3247 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3249 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3250 return DISAS_NEXT;
3253 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3255 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3256 return DISAS_NEXT;
3259 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3261 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3262 return DISAS_NEXT;
3265 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3267 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3268 return_low128(o->out2);
3269 return DISAS_NEXT;
3272 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3274 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3275 return_low128(o->out2);
3276 return DISAS_NEXT;
3279 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3281 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3282 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3283 tcg_temp_free_i64(r3);
3284 return DISAS_NEXT;
3287 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3289 int r3 = get_field(s->fields, r3);
3290 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3291 return DISAS_NEXT;
3294 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3296 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3297 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3298 tcg_temp_free_i64(r3);
3299 return DISAS_NEXT;
3302 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3304 int r3 = get_field(s->fields, r3);
3305 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3306 return DISAS_NEXT;
3309 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3311 TCGv_i64 z, n;
3312 z = tcg_const_i64(0);
3313 n = tcg_temp_new_i64();
3314 tcg_gen_neg_i64(n, o->in2);
3315 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3316 tcg_temp_free_i64(n);
3317 tcg_temp_free_i64(z);
3318 return DISAS_NEXT;
3321 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3323 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3324 return DISAS_NEXT;
3327 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3329 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3330 return DISAS_NEXT;
3333 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3335 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3336 tcg_gen_mov_i64(o->out2, o->in2);
3337 return DISAS_NEXT;
3340 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3342 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3343 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3344 tcg_temp_free_i32(l);
3345 set_cc_static(s);
3346 return DISAS_NEXT;
3349 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3351 tcg_gen_neg_i64(o->out, o->in2);
3352 return DISAS_NEXT;
3355 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3357 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3358 return DISAS_NEXT;
3361 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3363 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3364 return DISAS_NEXT;
3367 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3369 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3370 tcg_gen_mov_i64(o->out2, o->in2);
3371 return DISAS_NEXT;
3374 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3376 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3377 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3378 tcg_temp_free_i32(l);
3379 set_cc_static(s);
3380 return DISAS_NEXT;
3383 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3385 tcg_gen_or_i64(o->out, o->in1, o->in2);
3386 return DISAS_NEXT;
3389 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3391 int shift = s->insn->data & 0xff;
3392 int size = s->insn->data >> 8;
3393 uint64_t mask = ((1ull << size) - 1) << shift;
3395 assert(!o->g_in2);
3396 tcg_gen_shli_i64(o->in2, o->in2, shift);
3397 tcg_gen_or_i64(o->out, o->in1, o->in2);
3399 /* Produce the CC from only the bits manipulated. */
3400 tcg_gen_andi_i64(cc_dst, o->out, mask);
3401 set_cc_nz_u64(s, cc_dst);
3402 return DISAS_NEXT;
3405 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3407 o->in1 = tcg_temp_new_i64();
3409 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3410 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3411 } else {
3412 /* Perform the atomic operation in memory. */
3413 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3414 s->insn->data);
3417 /* Recompute also for atomic case: needed for setting CC. */
3418 tcg_gen_or_i64(o->out, o->in1, o->in2);
3420 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3421 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3423 return DISAS_NEXT;
3426 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3428 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3429 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3430 tcg_temp_free_i32(l);
3431 return DISAS_NEXT;
3434 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3436 int l2 = get_field(s->fields, l2) + 1;
3437 TCGv_i32 l;
3439 /* The length must not exceed 32 bytes. */
3440 if (l2 > 32) {
3441 gen_program_exception(s, PGM_SPECIFICATION);
3442 return DISAS_NORETURN;
3444 l = tcg_const_i32(l2);
3445 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3446 tcg_temp_free_i32(l);
3447 return DISAS_NEXT;
3450 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3452 int l2 = get_field(s->fields, l2) + 1;
3453 TCGv_i32 l;
3455 /* The length must be even and should not exceed 64 bytes. */
3456 if ((l2 & 1) || (l2 > 64)) {
3457 gen_program_exception(s, PGM_SPECIFICATION);
3458 return DISAS_NORETURN;
3460 l = tcg_const_i32(l2);
3461 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3462 tcg_temp_free_i32(l);
3463 return DISAS_NEXT;
3466 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3468 gen_helper_popcnt(o->out, o->in2);
3469 return DISAS_NEXT;
3472 #ifndef CONFIG_USER_ONLY
3473 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3475 check_privileged(s);
3476 gen_helper_ptlb(cpu_env);
3477 return DISAS_NEXT;
3479 #endif
3481 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3483 int i3 = get_field(s->fields, i3);
3484 int i4 = get_field(s->fields, i4);
3485 int i5 = get_field(s->fields, i5);
3486 int do_zero = i4 & 0x80;
3487 uint64_t mask, imask, pmask;
3488 int pos, len, rot;
3490 /* Adjust the arguments for the specific insn. */
3491 switch (s->fields->op2) {
3492 case 0x55: /* risbg */
3493 case 0x59: /* risbgn */
3494 i3 &= 63;
3495 i4 &= 63;
3496 pmask = ~0;
3497 break;
3498 case 0x5d: /* risbhg */
3499 i3 &= 31;
3500 i4 &= 31;
3501 pmask = 0xffffffff00000000ull;
3502 break;
3503 case 0x51: /* risblg */
3504 i3 &= 31;
3505 i4 &= 31;
3506 pmask = 0x00000000ffffffffull;
3507 break;
3508 default:
3509 g_assert_not_reached();
3512 /* MASK is the set of bits to be inserted from R2.
3513 Take care for I3/I4 wraparound. */
3514 mask = pmask >> i3;
3515 if (i3 <= i4) {
3516 mask ^= pmask >> i4 >> 1;
3517 } else {
3518 mask |= ~(pmask >> i4 >> 1);
3520 mask &= pmask;
3522 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3523 insns, we need to keep the other half of the register. */
3524 imask = ~mask | ~pmask;
3525 if (do_zero) {
3526 imask = ~pmask;
3529 len = i4 - i3 + 1;
3530 pos = 63 - i4;
3531 rot = i5 & 63;
3532 if (s->fields->op2 == 0x5d) {
3533 pos += 32;
3536 /* In some cases we can implement this with extract. */
3537 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3538 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3539 return DISAS_NEXT;
3542 /* In some cases we can implement this with deposit. */
3543 if (len > 0 && (imask == 0 || ~mask == imask)) {
3544 /* Note that we rotate the bits to be inserted to the lsb, not to
3545 the position as described in the PoO. */
3546 rot = (rot - pos) & 63;
3547 } else {
3548 pos = -1;
3551 /* Rotate the input as necessary. */
3552 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3554 /* Insert the selected bits into the output. */
3555 if (pos >= 0) {
3556 if (imask == 0) {
3557 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3558 } else {
3559 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3561 } else if (imask == 0) {
3562 tcg_gen_andi_i64(o->out, o->in2, mask);
3563 } else {
3564 tcg_gen_andi_i64(o->in2, o->in2, mask);
3565 tcg_gen_andi_i64(o->out, o->out, imask);
3566 tcg_gen_or_i64(o->out, o->out, o->in2);
3568 return DISAS_NEXT;
3571 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3573 int i3 = get_field(s->fields, i3);
3574 int i4 = get_field(s->fields, i4);
3575 int i5 = get_field(s->fields, i5);
3576 uint64_t mask;
3578 /* If this is a test-only form, arrange to discard the result. */
3579 if (i3 & 0x80) {
3580 o->out = tcg_temp_new_i64();
3581 o->g_out = false;
3584 i3 &= 63;
3585 i4 &= 63;
3586 i5 &= 63;
3588 /* MASK is the set of bits to be operated on from R2.
3589 Take care for I3/I4 wraparound. */
3590 mask = ~0ull >> i3;
3591 if (i3 <= i4) {
3592 mask ^= ~0ull >> i4 >> 1;
3593 } else {
3594 mask |= ~(~0ull >> i4 >> 1);
3597 /* Rotate the input as necessary. */
3598 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3600 /* Operate. */
3601 switch (s->fields->op2) {
3602 case 0x55: /* AND */
3603 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3604 tcg_gen_and_i64(o->out, o->out, o->in2);
3605 break;
3606 case 0x56: /* OR */
3607 tcg_gen_andi_i64(o->in2, o->in2, mask);
3608 tcg_gen_or_i64(o->out, o->out, o->in2);
3609 break;
3610 case 0x57: /* XOR */
3611 tcg_gen_andi_i64(o->in2, o->in2, mask);
3612 tcg_gen_xor_i64(o->out, o->out, o->in2);
3613 break;
3614 default:
3615 abort();
3618 /* Set the CC. */
3619 tcg_gen_andi_i64(cc_dst, o->out, mask);
3620 set_cc_nz_u64(s, cc_dst);
3621 return DISAS_NEXT;
3624 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3626 tcg_gen_bswap16_i64(o->out, o->in2);
3627 return DISAS_NEXT;
3630 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3632 tcg_gen_bswap32_i64(o->out, o->in2);
3633 return DISAS_NEXT;
3636 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3638 tcg_gen_bswap64_i64(o->out, o->in2);
3639 return DISAS_NEXT;
3642 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3644 TCGv_i32 t1 = tcg_temp_new_i32();
3645 TCGv_i32 t2 = tcg_temp_new_i32();
3646 TCGv_i32 to = tcg_temp_new_i32();
3647 tcg_gen_extrl_i64_i32(t1, o->in1);
3648 tcg_gen_extrl_i64_i32(t2, o->in2);
3649 tcg_gen_rotl_i32(to, t1, t2);
3650 tcg_gen_extu_i32_i64(o->out, to);
3651 tcg_temp_free_i32(t1);
3652 tcg_temp_free_i32(t2);
3653 tcg_temp_free_i32(to);
3654 return DISAS_NEXT;
3657 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3659 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3660 return DISAS_NEXT;
3663 #ifndef CONFIG_USER_ONLY
3664 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3666 check_privileged(s);
3667 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3668 set_cc_static(s);
3669 return DISAS_NEXT;
3672 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3674 check_privileged(s);
3675 gen_helper_sacf(cpu_env, o->in2);
3676 /* Addressing mode has changed, so end the block. */
3677 return DISAS_PC_STALE;
3679 #endif
3681 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3683 int sam = s->insn->data;
3684 TCGv_i64 tsam;
3685 uint64_t mask;
3687 switch (sam) {
3688 case 0:
3689 mask = 0xffffff;
3690 break;
3691 case 1:
3692 mask = 0x7fffffff;
3693 break;
3694 default:
3695 mask = -1;
3696 break;
3699 /* Bizarre but true, we check the address of the current insn for the
3700 specification exception, not the next to be executed. Thus the PoO
3701 documents that Bad Things Happen two bytes before the end. */
3702 if (s->base.pc_next & ~mask) {
3703 gen_program_exception(s, PGM_SPECIFICATION);
3704 return DISAS_NORETURN;
3706 s->pc_tmp &= mask;
3708 tsam = tcg_const_i64(sam);
3709 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3710 tcg_temp_free_i64(tsam);
3712 /* Always exit the TB, since we (may have) changed execution mode. */
3713 return DISAS_PC_STALE;
3716 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3718 int r1 = get_field(s->fields, r1);
3719 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3720 return DISAS_NEXT;
3723 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3725 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3726 return DISAS_NEXT;
3729 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3731 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3732 return DISAS_NEXT;
3735 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3737 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3738 return_low128(o->out2);
3739 return DISAS_NEXT;
3742 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3744 gen_helper_sqeb(o->out, cpu_env, o->in2);
3745 return DISAS_NEXT;
3748 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3750 gen_helper_sqdb(o->out, cpu_env, o->in2);
3751 return DISAS_NEXT;
3754 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3756 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3757 return_low128(o->out2);
3758 return DISAS_NEXT;
3761 #ifndef CONFIG_USER_ONLY
3762 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3764 check_privileged(s);
3765 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3766 set_cc_static(s);
3767 return DISAS_NEXT;
3770 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3772 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3773 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3774 check_privileged(s);
3775 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3776 set_cc_static(s);
3777 tcg_temp_free_i32(r1);
3778 tcg_temp_free_i32(r3);
3779 return DISAS_NEXT;
3781 #endif
3783 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3785 DisasCompare c;
3786 TCGv_i64 a, h;
3787 TCGLabel *lab;
3788 int r1;
3790 disas_jcc(s, &c, get_field(s->fields, m3));
3792 /* We want to store when the condition is fulfilled, so branch
3793 out when it's not */
3794 c.cond = tcg_invert_cond(c.cond);
3796 lab = gen_new_label();
3797 if (c.is_64) {
3798 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3799 } else {
3800 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3802 free_compare(&c);
3804 r1 = get_field(s->fields, r1);
3805 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3806 switch (s->insn->data) {
3807 case 1: /* STOCG */
3808 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3809 break;
3810 case 0: /* STOC */
3811 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3812 break;
3813 case 2: /* STOCFH */
3814 h = tcg_temp_new_i64();
3815 tcg_gen_shri_i64(h, regs[r1], 32);
3816 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3817 tcg_temp_free_i64(h);
3818 break;
3819 default:
3820 g_assert_not_reached();
3822 tcg_temp_free_i64(a);
3824 gen_set_label(lab);
3825 return DISAS_NEXT;
3828 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3830 uint64_t sign = 1ull << s->insn->data;
3831 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3832 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3833 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3834 /* The arithmetic left shift is curious in that it does not affect
3835 the sign bit. Copy that over from the source unchanged. */
3836 tcg_gen_andi_i64(o->out, o->out, ~sign);
3837 tcg_gen_andi_i64(o->in1, o->in1, sign);
3838 tcg_gen_or_i64(o->out, o->out, o->in1);
3839 return DISAS_NEXT;
3842 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3844 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3845 return DISAS_NEXT;
3848 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3850 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3851 return DISAS_NEXT;
3854 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3856 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3857 return DISAS_NEXT;
3860 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3862 gen_helper_sfpc(cpu_env, o->in2);
3863 return DISAS_NEXT;
3866 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3868 gen_helper_sfas(cpu_env, o->in2);
3869 return DISAS_NEXT;
3872 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3874 int b2 = get_field(s->fields, b2);
3875 int d2 = get_field(s->fields, d2);
3876 TCGv_i64 t1 = tcg_temp_new_i64();
3877 TCGv_i64 t2 = tcg_temp_new_i64();
3878 int mask, pos, len;
3880 switch (s->fields->op2) {
3881 case 0x99: /* SRNM */
3882 pos = 0, len = 2;
3883 break;
3884 case 0xb8: /* SRNMB */
3885 pos = 0, len = 3;
3886 break;
3887 case 0xb9: /* SRNMT */
3888 pos = 4, len = 3;
3889 break;
3890 default:
3891 tcg_abort();
3893 mask = (1 << len) - 1;
3895 /* Insert the value into the appropriate field of the FPC. */
3896 if (b2 == 0) {
3897 tcg_gen_movi_i64(t1, d2 & mask);
3898 } else {
3899 tcg_gen_addi_i64(t1, regs[b2], d2);
3900 tcg_gen_andi_i64(t1, t1, mask);
3902 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3903 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3904 tcg_temp_free_i64(t1);
3906 /* Then install the new FPC to set the rounding mode in fpu_status. */
3907 gen_helper_sfpc(cpu_env, t2);
3908 tcg_temp_free_i64(t2);
3909 return DISAS_NEXT;
3912 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
3914 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3915 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3916 set_cc_static(s);
3918 tcg_gen_shri_i64(o->in1, o->in1, 24);
3919 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3920 return DISAS_NEXT;
3923 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
3925 int b1 = get_field(s->fields, b1);
3926 int d1 = get_field(s->fields, d1);
3927 int b2 = get_field(s->fields, b2);
3928 int d2 = get_field(s->fields, d2);
3929 int r3 = get_field(s->fields, r3);
3930 TCGv_i64 tmp = tcg_temp_new_i64();
3932 /* fetch all operands first */
3933 o->in1 = tcg_temp_new_i64();
3934 tcg_gen_addi_i64(o->in1, regs[b1], d1);
3935 o->in2 = tcg_temp_new_i64();
3936 tcg_gen_addi_i64(o->in2, regs[b2], d2);
3937 o->addr1 = get_address(s, 0, r3, 0);
3939 /* load the third operand into r3 before modifying anything */
3940 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
3942 /* subtract CPU timer from first operand and store in GR0 */
3943 gen_helper_stpt(tmp, cpu_env);
3944 tcg_gen_sub_i64(regs[0], o->in1, tmp);
3946 /* store second operand in GR1 */
3947 tcg_gen_mov_i64(regs[1], o->in2);
3949 tcg_temp_free_i64(tmp);
3950 return DISAS_NEXT;
3953 #ifndef CONFIG_USER_ONLY
3954 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
3956 check_privileged(s);
3957 tcg_gen_shri_i64(o->in2, o->in2, 4);
3958 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3959 return DISAS_NEXT;
3962 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
3964 check_privileged(s);
3965 gen_helper_sske(cpu_env, o->in1, o->in2);
3966 return DISAS_NEXT;
3969 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
3971 check_privileged(s);
3972 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3973 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3974 return DISAS_PC_STALE_NOCHAIN;
3977 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
3979 check_privileged(s);
3980 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3981 return DISAS_NEXT;
3984 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
3986 gen_helper_stck(o->out, cpu_env);
3987 /* ??? We don't implement clock states. */
3988 gen_op_movi_cc(s, 0);
3989 return DISAS_NEXT;
3992 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
3994 TCGv_i64 c1 = tcg_temp_new_i64();
3995 TCGv_i64 c2 = tcg_temp_new_i64();
3996 TCGv_i64 todpr = tcg_temp_new_i64();
3997 gen_helper_stck(c1, cpu_env);
3998 /* 16 bit value store in an uint32_t (only valid bits set) */
3999 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4000 /* Shift the 64-bit value into its place as a zero-extended
4001 104-bit value. Note that "bit positions 64-103 are always
4002 non-zero so that they compare differently to STCK"; we set
4003 the least significant bit to 1. */
4004 tcg_gen_shli_i64(c2, c1, 56);
4005 tcg_gen_shri_i64(c1, c1, 8);
4006 tcg_gen_ori_i64(c2, c2, 0x10000);
4007 tcg_gen_or_i64(c2, c2, todpr);
4008 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4009 tcg_gen_addi_i64(o->in2, o->in2, 8);
4010 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4011 tcg_temp_free_i64(c1);
4012 tcg_temp_free_i64(c2);
4013 tcg_temp_free_i64(todpr);
4014 /* ??? We don't implement clock states. */
4015 gen_op_movi_cc(s, 0);
4016 return DISAS_NEXT;
4019 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4021 check_privileged(s);
4022 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4023 gen_helper_sck(cc_op, cpu_env, o->in1);
4024 set_cc_static(s);
4025 return DISAS_NEXT;
4028 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4030 check_privileged(s);
4031 gen_helper_sckc(cpu_env, o->in2);
4032 return DISAS_NEXT;
4035 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4037 check_privileged(s);
4038 gen_helper_sckpf(cpu_env, regs[0]);
4039 return DISAS_NEXT;
4042 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4044 check_privileged(s);
4045 gen_helper_stckc(o->out, cpu_env);
4046 return DISAS_NEXT;
4049 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4051 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4052 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4053 check_privileged(s);
4054 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4055 tcg_temp_free_i32(r1);
4056 tcg_temp_free_i32(r3);
4057 return DISAS_NEXT;
4060 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4062 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4063 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4064 check_privileged(s);
4065 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4066 tcg_temp_free_i32(r1);
4067 tcg_temp_free_i32(r3);
4068 return DISAS_NEXT;
4071 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4073 check_privileged(s);
4074 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4075 return DISAS_NEXT;
4078 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4080 check_privileged(s);
4081 gen_helper_spt(cpu_env, o->in2);
4082 return DISAS_NEXT;
4085 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4087 check_privileged(s);
4088 gen_helper_stfl(cpu_env);
4089 return DISAS_NEXT;
4092 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4094 check_privileged(s);
4095 gen_helper_stpt(o->out, cpu_env);
4096 return DISAS_NEXT;
4099 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4101 check_privileged(s);
4102 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4103 set_cc_static(s);
4104 return DISAS_NEXT;
4107 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4109 check_privileged(s);
4110 gen_helper_spx(cpu_env, o->in2);
4111 return DISAS_NEXT;
4114 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4116 check_privileged(s);
4117 gen_helper_xsch(cpu_env, regs[1]);
4118 set_cc_static(s);
4119 return DISAS_NEXT;
4122 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4124 check_privileged(s);
4125 gen_helper_csch(cpu_env, regs[1]);
4126 set_cc_static(s);
4127 return DISAS_NEXT;
4130 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4132 check_privileged(s);
4133 gen_helper_hsch(cpu_env, regs[1]);
4134 set_cc_static(s);
4135 return DISAS_NEXT;
4138 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4140 check_privileged(s);
4141 gen_helper_msch(cpu_env, regs[1], o->in2);
4142 set_cc_static(s);
4143 return DISAS_NEXT;
4146 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4148 check_privileged(s);
4149 gen_helper_rchp(cpu_env, regs[1]);
4150 set_cc_static(s);
4151 return DISAS_NEXT;
4154 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4156 check_privileged(s);
4157 gen_helper_rsch(cpu_env, regs[1]);
4158 set_cc_static(s);
4159 return DISAS_NEXT;
4162 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4164 check_privileged(s);
4165 gen_helper_sal(cpu_env, regs[1]);
4166 return DISAS_NEXT;
4169 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4171 check_privileged(s);
4172 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4173 return DISAS_NEXT;
4176 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4178 check_privileged(s);
4179 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4180 gen_op_movi_cc(s, 3);
4181 return DISAS_NEXT;
4184 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4186 check_privileged(s);
4187 /* The instruction is suppressed if not provided. */
4188 return DISAS_NEXT;
4191 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4193 check_privileged(s);
4194 gen_helper_ssch(cpu_env, regs[1], o->in2);
4195 set_cc_static(s);
4196 return DISAS_NEXT;
4199 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4201 check_privileged(s);
4202 gen_helper_stsch(cpu_env, regs[1], o->in2);
4203 set_cc_static(s);
4204 return DISAS_NEXT;
4207 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4209 check_privileged(s);
4210 gen_helper_stcrw(cpu_env, o->in2);
4211 set_cc_static(s);
4212 return DISAS_NEXT;
4215 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4217 check_privileged(s);
4218 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4219 set_cc_static(s);
4220 return DISAS_NEXT;
4223 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4225 check_privileged(s);
4226 gen_helper_tsch(cpu_env, regs[1], o->in2);
4227 set_cc_static(s);
4228 return DISAS_NEXT;
4231 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4233 check_privileged(s);
4234 gen_helper_chsc(cpu_env, o->in2);
4235 set_cc_static(s);
4236 return DISAS_NEXT;
4239 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4241 check_privileged(s);
4242 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4243 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4244 return DISAS_NEXT;
4247 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4249 uint64_t i2 = get_field(s->fields, i2);
4250 TCGv_i64 t;
4252 check_privileged(s);
4254 /* It is important to do what the instruction name says: STORE THEN.
4255 If we let the output hook perform the store then if we fault and
4256 restart, we'll have the wrong SYSTEM MASK in place. */
4257 t = tcg_temp_new_i64();
4258 tcg_gen_shri_i64(t, psw_mask, 56);
4259 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4260 tcg_temp_free_i64(t);
4262 if (s->fields->op == 0xac) {
4263 tcg_gen_andi_i64(psw_mask, psw_mask,
4264 (i2 << 56) | 0x00ffffffffffffffull);
4265 } else {
4266 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4269 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4270 return DISAS_PC_STALE_NOCHAIN;
4273 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4275 check_privileged(s);
4276 gen_helper_stura(cpu_env, o->in2, o->in1);
4277 return DISAS_NEXT;
4280 static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4282 check_privileged(s);
4283 gen_helper_sturg(cpu_env, o->in2, o->in1);
4284 return DISAS_NEXT;
4286 #endif
4288 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4290 gen_helper_stfle(cc_op, cpu_env, o->in2);
4291 set_cc_static(s);
4292 return DISAS_NEXT;
4295 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4297 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4298 return DISAS_NEXT;
4301 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4303 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4304 return DISAS_NEXT;
4307 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4309 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4310 return DISAS_NEXT;
4313 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4315 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4316 return DISAS_NEXT;
4319 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4321 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4322 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4323 gen_helper_stam(cpu_env, r1, o->in2, r3);
4324 tcg_temp_free_i32(r1);
4325 tcg_temp_free_i32(r3);
4326 return DISAS_NEXT;
4329 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4331 int m3 = get_field(s->fields, m3);
4332 int pos, base = s->insn->data;
4333 TCGv_i64 tmp = tcg_temp_new_i64();
4335 pos = base + ctz32(m3) * 8;
4336 switch (m3) {
4337 case 0xf:
4338 /* Effectively a 32-bit store. */
4339 tcg_gen_shri_i64(tmp, o->in1, pos);
4340 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4341 break;
4343 case 0xc:
4344 case 0x6:
4345 case 0x3:
4346 /* Effectively a 16-bit store. */
4347 tcg_gen_shri_i64(tmp, o->in1, pos);
4348 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4349 break;
4351 case 0x8:
4352 case 0x4:
4353 case 0x2:
4354 case 0x1:
4355 /* Effectively an 8-bit store. */
4356 tcg_gen_shri_i64(tmp, o->in1, pos);
4357 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4358 break;
4360 default:
4361 /* This is going to be a sequence of shifts and stores. */
4362 pos = base + 32 - 8;
4363 while (m3) {
4364 if (m3 & 0x8) {
4365 tcg_gen_shri_i64(tmp, o->in1, pos);
4366 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4367 tcg_gen_addi_i64(o->in2, o->in2, 1);
4369 m3 = (m3 << 1) & 0xf;
4370 pos -= 8;
4372 break;
4374 tcg_temp_free_i64(tmp);
4375 return DISAS_NEXT;
4378 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4380 int r1 = get_field(s->fields, r1);
4381 int r3 = get_field(s->fields, r3);
4382 int size = s->insn->data;
4383 TCGv_i64 tsize = tcg_const_i64(size);
4385 while (1) {
4386 if (size == 8) {
4387 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4388 } else {
4389 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4391 if (r1 == r3) {
4392 break;
4394 tcg_gen_add_i64(o->in2, o->in2, tsize);
4395 r1 = (r1 + 1) & 15;
4398 tcg_temp_free_i64(tsize);
4399 return DISAS_NEXT;
4402 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4404 int r1 = get_field(s->fields, r1);
4405 int r3 = get_field(s->fields, r3);
4406 TCGv_i64 t = tcg_temp_new_i64();
4407 TCGv_i64 t4 = tcg_const_i64(4);
4408 TCGv_i64 t32 = tcg_const_i64(32);
4410 while (1) {
4411 tcg_gen_shl_i64(t, regs[r1], t32);
4412 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4413 if (r1 == r3) {
4414 break;
4416 tcg_gen_add_i64(o->in2, o->in2, t4);
4417 r1 = (r1 + 1) & 15;
4420 tcg_temp_free_i64(t);
4421 tcg_temp_free_i64(t4);
4422 tcg_temp_free_i64(t32);
4423 return DISAS_NEXT;
4426 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4428 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4429 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4430 } else {
4431 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4433 return DISAS_NEXT;
4436 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4438 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4439 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4441 gen_helper_srst(cpu_env, r1, r2);
4443 tcg_temp_free_i32(r1);
4444 tcg_temp_free_i32(r2);
4445 set_cc_static(s);
4446 return DISAS_NEXT;
4449 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4451 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4452 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4454 gen_helper_srstu(cpu_env, r1, r2);
4456 tcg_temp_free_i32(r1);
4457 tcg_temp_free_i32(r2);
4458 set_cc_static(s);
4459 return DISAS_NEXT;
4462 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4464 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4465 return DISAS_NEXT;
4468 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4470 DisasCompare cmp;
4471 TCGv_i64 borrow;
4473 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4475 /* The !borrow flag is the msb of CC. Since we want the inverse of
4476 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4477 disas_jcc(s, &cmp, 8 | 4);
4478 borrow = tcg_temp_new_i64();
4479 if (cmp.is_64) {
4480 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4481 } else {
4482 TCGv_i32 t = tcg_temp_new_i32();
4483 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4484 tcg_gen_extu_i32_i64(borrow, t);
4485 tcg_temp_free_i32(t);
4487 free_compare(&cmp);
4489 tcg_gen_sub_i64(o->out, o->out, borrow);
4490 tcg_temp_free_i64(borrow);
4491 return DISAS_NEXT;
4494 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4496 TCGv_i32 t;
4498 update_psw_addr(s);
4499 update_cc_op(s);
4501 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4502 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4503 tcg_temp_free_i32(t);
4505 t = tcg_const_i32(s->ilen);
4506 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4507 tcg_temp_free_i32(t);
4509 gen_exception(EXCP_SVC);
4510 return DISAS_NORETURN;
4513 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4515 int cc = 0;
4517 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4518 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4519 gen_op_movi_cc(s, cc);
4520 return DISAS_NEXT;
4523 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4525 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4526 set_cc_static(s);
4527 return DISAS_NEXT;
4530 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4532 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4533 set_cc_static(s);
4534 return DISAS_NEXT;
4537 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4539 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4540 set_cc_static(s);
4541 return DISAS_NEXT;
4544 #ifndef CONFIG_USER_ONLY
4546 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4548 check_privileged(s);
4549 gen_helper_testblock(cc_op, cpu_env, o->in2);
4550 set_cc_static(s);
4551 return DISAS_NEXT;
4554 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4556 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4557 set_cc_static(s);
4558 return DISAS_NEXT;
4561 #endif
4563 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4565 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4566 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4567 tcg_temp_free_i32(l1);
4568 set_cc_static(s);
4569 return DISAS_NEXT;
4572 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4574 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4575 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4576 tcg_temp_free_i32(l);
4577 set_cc_static(s);
4578 return DISAS_NEXT;
4581 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4583 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4584 return_low128(o->out2);
4585 set_cc_static(s);
4586 return DISAS_NEXT;
4589 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4591 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4592 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4593 tcg_temp_free_i32(l);
4594 set_cc_static(s);
4595 return DISAS_NEXT;
4598 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4600 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4601 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4602 tcg_temp_free_i32(l);
4603 set_cc_static(s);
4604 return DISAS_NEXT;
4607 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4609 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4610 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4611 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4612 TCGv_i32 tst = tcg_temp_new_i32();
4613 int m3 = get_field(s->fields, m3);
4615 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4616 m3 = 0;
4618 if (m3 & 1) {
4619 tcg_gen_movi_i32(tst, -1);
4620 } else {
4621 tcg_gen_extrl_i64_i32(tst, regs[0]);
4622 if (s->insn->opc & 3) {
4623 tcg_gen_ext8u_i32(tst, tst);
4624 } else {
4625 tcg_gen_ext16u_i32(tst, tst);
4628 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4630 tcg_temp_free_i32(r1);
4631 tcg_temp_free_i32(r2);
4632 tcg_temp_free_i32(sizes);
4633 tcg_temp_free_i32(tst);
4634 set_cc_static(s);
4635 return DISAS_NEXT;
4638 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4640 TCGv_i32 t1 = tcg_const_i32(0xff);
4641 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4642 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4643 tcg_temp_free_i32(t1);
4644 set_cc_static(s);
4645 return DISAS_NEXT;
4648 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4650 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4651 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4652 tcg_temp_free_i32(l);
4653 return DISAS_NEXT;
4656 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4658 int l1 = get_field(s->fields, l1) + 1;
4659 TCGv_i32 l;
4661 /* The length must not exceed 32 bytes. */
4662 if (l1 > 32) {
4663 gen_program_exception(s, PGM_SPECIFICATION);
4664 return DISAS_NORETURN;
4666 l = tcg_const_i32(l1);
4667 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4668 tcg_temp_free_i32(l);
4669 set_cc_static(s);
4670 return DISAS_NEXT;
4673 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4675 int l1 = get_field(s->fields, l1) + 1;
4676 TCGv_i32 l;
4678 /* The length must be even and should not exceed 64 bytes. */
4679 if ((l1 & 1) || (l1 > 64)) {
4680 gen_program_exception(s, PGM_SPECIFICATION);
4681 return DISAS_NORETURN;
4683 l = tcg_const_i32(l1);
4684 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4685 tcg_temp_free_i32(l);
4686 set_cc_static(s);
4687 return DISAS_NEXT;
4691 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4693 int d1 = get_field(s->fields, d1);
4694 int d2 = get_field(s->fields, d2);
4695 int b1 = get_field(s->fields, b1);
4696 int b2 = get_field(s->fields, b2);
4697 int l = get_field(s->fields, l1);
4698 TCGv_i32 t32;
4700 o->addr1 = get_address(s, 0, b1, d1);
4702 /* If the addresses are identical, this is a store/memset of zero. */
4703 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4704 o->in2 = tcg_const_i64(0);
4706 l++;
4707 while (l >= 8) {
4708 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4709 l -= 8;
4710 if (l > 0) {
4711 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4714 if (l >= 4) {
4715 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4716 l -= 4;
4717 if (l > 0) {
4718 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4721 if (l >= 2) {
4722 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4723 l -= 2;
4724 if (l > 0) {
4725 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4728 if (l) {
4729 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4731 gen_op_movi_cc(s, 0);
4732 return DISAS_NEXT;
4735 /* But in general we'll defer to a helper. */
4736 o->in2 = get_address(s, 0, b2, d2);
4737 t32 = tcg_const_i32(l);
4738 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4739 tcg_temp_free_i32(t32);
4740 set_cc_static(s);
4741 return DISAS_NEXT;
4744 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4746 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4747 return DISAS_NEXT;
4750 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4752 int shift = s->insn->data & 0xff;
4753 int size = s->insn->data >> 8;
4754 uint64_t mask = ((1ull << size) - 1) << shift;
4756 assert(!o->g_in2);
4757 tcg_gen_shli_i64(o->in2, o->in2, shift);
4758 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4760 /* Produce the CC from only the bits manipulated. */
4761 tcg_gen_andi_i64(cc_dst, o->out, mask);
4762 set_cc_nz_u64(s, cc_dst);
4763 return DISAS_NEXT;
4766 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4768 o->in1 = tcg_temp_new_i64();
4770 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4771 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4772 } else {
4773 /* Perform the atomic operation in memory. */
4774 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4775 s->insn->data);
4778 /* Recompute also for atomic case: needed for setting CC. */
4779 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4781 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4782 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4784 return DISAS_NEXT;
4787 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4789 o->out = tcg_const_i64(0);
4790 return DISAS_NEXT;
4793 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4795 o->out = tcg_const_i64(0);
4796 o->out2 = o->out;
4797 o->g_out2 = true;
4798 return DISAS_NEXT;
4801 #ifndef CONFIG_USER_ONLY
4802 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4804 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4806 check_privileged(s);
4807 gen_helper_clp(cpu_env, r2);
4808 tcg_temp_free_i32(r2);
4809 set_cc_static(s);
4810 return DISAS_NEXT;
4813 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4815 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4816 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4818 check_privileged(s);
4819 gen_helper_pcilg(cpu_env, r1, r2);
4820 tcg_temp_free_i32(r1);
4821 tcg_temp_free_i32(r2);
4822 set_cc_static(s);
4823 return DISAS_NEXT;
4826 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4828 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4829 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4831 check_privileged(s);
4832 gen_helper_pcistg(cpu_env, r1, r2);
4833 tcg_temp_free_i32(r1);
4834 tcg_temp_free_i32(r2);
4835 set_cc_static(s);
4836 return DISAS_NEXT;
4839 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4841 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4842 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4844 check_privileged(s);
4845 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4846 tcg_temp_free_i32(ar);
4847 tcg_temp_free_i32(r1);
4848 set_cc_static(s);
4849 return DISAS_NEXT;
4852 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4854 check_privileged(s);
4855 gen_helper_sic(cpu_env, o->in1, o->in2);
4856 return DISAS_NEXT;
4859 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4861 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4862 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4864 check_privileged(s);
4865 gen_helper_rpcit(cpu_env, r1, r2);
4866 tcg_temp_free_i32(r1);
4867 tcg_temp_free_i32(r2);
4868 set_cc_static(s);
4869 return DISAS_NEXT;
4872 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4874 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4875 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4876 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4878 check_privileged(s);
4879 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4880 tcg_temp_free_i32(ar);
4881 tcg_temp_free_i32(r1);
4882 tcg_temp_free_i32(r3);
4883 set_cc_static(s);
4884 return DISAS_NEXT;
4887 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4889 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4890 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4892 check_privileged(s);
4893 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4894 tcg_temp_free_i32(ar);
4895 tcg_temp_free_i32(r1);
4896 set_cc_static(s);
4897 return DISAS_NEXT;
4899 #endif
4901 /* ====================================================================== */
4902 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4903 the original inputs), update the various cc data structures in order to
4904 be able to compute the new condition code. */
4906 static void cout_abs32(DisasContext *s, DisasOps *o)
4908 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4911 static void cout_abs64(DisasContext *s, DisasOps *o)
4913 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4916 static void cout_adds32(DisasContext *s, DisasOps *o)
4918 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4921 static void cout_adds64(DisasContext *s, DisasOps *o)
4923 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4926 static void cout_addu32(DisasContext *s, DisasOps *o)
4928 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4931 static void cout_addu64(DisasContext *s, DisasOps *o)
4933 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4936 static void cout_addc32(DisasContext *s, DisasOps *o)
4938 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4941 static void cout_addc64(DisasContext *s, DisasOps *o)
4943 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4946 static void cout_cmps32(DisasContext *s, DisasOps *o)
4948 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4951 static void cout_cmps64(DisasContext *s, DisasOps *o)
4953 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4956 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4958 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4961 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4963 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4966 static void cout_f32(DisasContext *s, DisasOps *o)
4968 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4971 static void cout_f64(DisasContext *s, DisasOps *o)
4973 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4976 static void cout_f128(DisasContext *s, DisasOps *o)
4978 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4981 static void cout_nabs32(DisasContext *s, DisasOps *o)
4983 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4986 static void cout_nabs64(DisasContext *s, DisasOps *o)
4988 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4991 static void cout_neg32(DisasContext *s, DisasOps *o)
4993 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4996 static void cout_neg64(DisasContext *s, DisasOps *o)
4998 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5001 static void cout_nz32(DisasContext *s, DisasOps *o)
5003 tcg_gen_ext32u_i64(cc_dst, o->out);
5004 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5007 static void cout_nz64(DisasContext *s, DisasOps *o)
5009 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5012 static void cout_s32(DisasContext *s, DisasOps *o)
5014 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5017 static void cout_s64(DisasContext *s, DisasOps *o)
5019 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5022 static void cout_subs32(DisasContext *s, DisasOps *o)
5024 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5027 static void cout_subs64(DisasContext *s, DisasOps *o)
5029 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5032 static void cout_subu32(DisasContext *s, DisasOps *o)
5034 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5037 static void cout_subu64(DisasContext *s, DisasOps *o)
5039 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5042 static void cout_subb32(DisasContext *s, DisasOps *o)
5044 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5047 static void cout_subb64(DisasContext *s, DisasOps *o)
5049 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5052 static void cout_tm32(DisasContext *s, DisasOps *o)
5054 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5057 static void cout_tm64(DisasContext *s, DisasOps *o)
5059 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5062 /* ====================================================================== */
5063 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5064 with the TCG register to which we will write. Used in combination with
5065 the "wout" generators, in some cases we need a new temporary, and in
5066 some cases we can write to a TCG global. */
5068 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5070 o->out = tcg_temp_new_i64();
5072 #define SPEC_prep_new 0
5074 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5076 o->out = tcg_temp_new_i64();
5077 o->out2 = tcg_temp_new_i64();
5079 #define SPEC_prep_new_P 0
5081 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5083 o->out = regs[get_field(f, r1)];
5084 o->g_out = true;
5086 #define SPEC_prep_r1 0
5088 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5090 int r1 = get_field(f, r1);
5091 o->out = regs[r1];
5092 o->out2 = regs[r1 + 1];
5093 o->g_out = o->g_out2 = true;
5095 #define SPEC_prep_r1_P SPEC_r1_even
5097 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5099 o->out = fregs[get_field(f, r1)];
5100 o->g_out = true;
5102 #define SPEC_prep_f1 0
5104 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5106 int r1 = get_field(f, r1);
5107 o->out = fregs[r1];
5108 o->out2 = fregs[r1 + 2];
5109 o->g_out = o->g_out2 = true;
5111 #define SPEC_prep_x1 SPEC_r1_f128
5113 /* ====================================================================== */
5114 /* The "Write OUTput" generators. These generally perform some non-trivial
5115 copy of data to TCG globals, or to main memory. The trivial cases are
5116 generally handled by having a "prep" generator install the TCG global
5117 as the destination of the operation. */
5119 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5121 store_reg(get_field(f, r1), o->out);
5123 #define SPEC_wout_r1 0
5125 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5127 int r1 = get_field(f, r1);
5128 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5130 #define SPEC_wout_r1_8 0
5132 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5134 int r1 = get_field(f, r1);
5135 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5137 #define SPEC_wout_r1_16 0
5139 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5141 store_reg32_i64(get_field(f, r1), o->out);
5143 #define SPEC_wout_r1_32 0
5145 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5147 store_reg32h_i64(get_field(f, r1), o->out);
5149 #define SPEC_wout_r1_32h 0
5151 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5153 int r1 = get_field(f, r1);
5154 store_reg32_i64(r1, o->out);
5155 store_reg32_i64(r1 + 1, o->out2);
5157 #define SPEC_wout_r1_P32 SPEC_r1_even
5159 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5161 int r1 = get_field(f, r1);
5162 store_reg32_i64(r1 + 1, o->out);
5163 tcg_gen_shri_i64(o->out, o->out, 32);
5164 store_reg32_i64(r1, o->out);
5166 #define SPEC_wout_r1_D32 SPEC_r1_even
5168 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5170 int r3 = get_field(f, r3);
5171 store_reg32_i64(r3, o->out);
5172 store_reg32_i64(r3 + 1, o->out2);
5174 #define SPEC_wout_r3_P32 SPEC_r3_even
5176 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5178 int r3 = get_field(f, r3);
5179 store_reg(r3, o->out);
5180 store_reg(r3 + 1, o->out2);
5182 #define SPEC_wout_r3_P64 SPEC_r3_even
5184 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5186 store_freg32_i64(get_field(f, r1), o->out);
5188 #define SPEC_wout_e1 0
5190 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5192 store_freg(get_field(f, r1), o->out);
5194 #define SPEC_wout_f1 0
5196 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5198 int f1 = get_field(s->fields, r1);
5199 store_freg(f1, o->out);
5200 store_freg(f1 + 2, o->out2);
5202 #define SPEC_wout_x1 SPEC_r1_f128
5204 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5206 if (get_field(f, r1) != get_field(f, r2)) {
5207 store_reg32_i64(get_field(f, r1), o->out);
5210 #define SPEC_wout_cond_r1r2_32 0
5212 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5214 if (get_field(f, r1) != get_field(f, r2)) {
5215 store_freg32_i64(get_field(f, r1), o->out);
5218 #define SPEC_wout_cond_e1e2 0
5220 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5222 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5224 #define SPEC_wout_m1_8 0
5226 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5228 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5230 #define SPEC_wout_m1_16 0
5232 #ifndef CONFIG_USER_ONLY
5233 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5235 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5237 #define SPEC_wout_m1_16a 0
5238 #endif
5240 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5242 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5244 #define SPEC_wout_m1_32 0
5246 #ifndef CONFIG_USER_ONLY
5247 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5249 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5251 #define SPEC_wout_m1_32a 0
5252 #endif
5254 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5256 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5258 #define SPEC_wout_m1_64 0
5260 #ifndef CONFIG_USER_ONLY
5261 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5263 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5265 #define SPEC_wout_m1_64a 0
5266 #endif
5268 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5270 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5272 #define SPEC_wout_m2_32 0
5274 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5276 store_reg(get_field(f, r1), o->in2);
5278 #define SPEC_wout_in2_r1 0
5280 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5282 store_reg32_i64(get_field(f, r1), o->in2);
5284 #define SPEC_wout_in2_r1_32 0
5286 /* ====================================================================== */
5287 /* The "INput 1" generators. These load the first operand to an insn. */
5289 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5291 o->in1 = load_reg(get_field(f, r1));
5293 #define SPEC_in1_r1 0
5295 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5297 o->in1 = regs[get_field(f, r1)];
5298 o->g_in1 = true;
5300 #define SPEC_in1_r1_o 0
5302 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5304 o->in1 = tcg_temp_new_i64();
5305 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5307 #define SPEC_in1_r1_32s 0
5309 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5311 o->in1 = tcg_temp_new_i64();
5312 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5314 #define SPEC_in1_r1_32u 0
5316 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5318 o->in1 = tcg_temp_new_i64();
5319 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5321 #define SPEC_in1_r1_sr32 0
5323 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5325 o->in1 = load_reg(get_field(f, r1) + 1);
5327 #define SPEC_in1_r1p1 SPEC_r1_even
5329 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5331 o->in1 = tcg_temp_new_i64();
5332 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5334 #define SPEC_in1_r1p1_32s SPEC_r1_even
5336 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5338 o->in1 = tcg_temp_new_i64();
5339 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5341 #define SPEC_in1_r1p1_32u SPEC_r1_even
5343 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5345 int r1 = get_field(f, r1);
5346 o->in1 = tcg_temp_new_i64();
5347 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5349 #define SPEC_in1_r1_D32 SPEC_r1_even
5351 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5353 o->in1 = load_reg(get_field(f, r2));
5355 #define SPEC_in1_r2 0
5357 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5359 o->in1 = tcg_temp_new_i64();
5360 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5362 #define SPEC_in1_r2_sr32 0
5364 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5366 o->in1 = load_reg(get_field(f, r3));
5368 #define SPEC_in1_r3 0
5370 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5372 o->in1 = regs[get_field(f, r3)];
5373 o->g_in1 = true;
5375 #define SPEC_in1_r3_o 0
5377 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5379 o->in1 = tcg_temp_new_i64();
5380 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5382 #define SPEC_in1_r3_32s 0
5384 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5386 o->in1 = tcg_temp_new_i64();
5387 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5389 #define SPEC_in1_r3_32u 0
5391 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5393 int r3 = get_field(f, r3);
5394 o->in1 = tcg_temp_new_i64();
5395 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5397 #define SPEC_in1_r3_D32 SPEC_r3_even
5399 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5401 o->in1 = load_freg32_i64(get_field(f, r1));
5403 #define SPEC_in1_e1 0
5405 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5407 o->in1 = fregs[get_field(f, r1)];
5408 o->g_in1 = true;
5410 #define SPEC_in1_f1_o 0
5412 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5414 int r1 = get_field(f, r1);
5415 o->out = fregs[r1];
5416 o->out2 = fregs[r1 + 2];
5417 o->g_out = o->g_out2 = true;
5419 #define SPEC_in1_x1_o SPEC_r1_f128
5421 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5423 o->in1 = fregs[get_field(f, r3)];
5424 o->g_in1 = true;
5426 #define SPEC_in1_f3_o 0
5428 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5430 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5432 #define SPEC_in1_la1 0
5434 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5436 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5437 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5439 #define SPEC_in1_la2 0
5441 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5443 in1_la1(s, f, o);
5444 o->in1 = tcg_temp_new_i64();
5445 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5447 #define SPEC_in1_m1_8u 0
5449 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5451 in1_la1(s, f, o);
5452 o->in1 = tcg_temp_new_i64();
5453 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5455 #define SPEC_in1_m1_16s 0
5457 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5459 in1_la1(s, f, o);
5460 o->in1 = tcg_temp_new_i64();
5461 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5463 #define SPEC_in1_m1_16u 0
5465 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5467 in1_la1(s, f, o);
5468 o->in1 = tcg_temp_new_i64();
5469 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5471 #define SPEC_in1_m1_32s 0
5473 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5475 in1_la1(s, f, o);
5476 o->in1 = tcg_temp_new_i64();
5477 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5479 #define SPEC_in1_m1_32u 0
5481 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5483 in1_la1(s, f, o);
5484 o->in1 = tcg_temp_new_i64();
5485 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5487 #define SPEC_in1_m1_64 0
5489 /* ====================================================================== */
5490 /* The "INput 2" generators. These load the second operand to an insn. */
5492 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5494 o->in2 = regs[get_field(f, r1)];
5495 o->g_in2 = true;
5497 #define SPEC_in2_r1_o 0
5499 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5501 o->in2 = tcg_temp_new_i64();
5502 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5504 #define SPEC_in2_r1_16u 0
5506 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5508 o->in2 = tcg_temp_new_i64();
5509 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5511 #define SPEC_in2_r1_32u 0
5513 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5515 int r1 = get_field(f, r1);
5516 o->in2 = tcg_temp_new_i64();
5517 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5519 #define SPEC_in2_r1_D32 SPEC_r1_even
5521 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5523 o->in2 = load_reg(get_field(f, r2));
5525 #define SPEC_in2_r2 0
5527 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5529 o->in2 = regs[get_field(f, r2)];
5530 o->g_in2 = true;
5532 #define SPEC_in2_r2_o 0
5534 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5536 int r2 = get_field(f, r2);
5537 if (r2 != 0) {
5538 o->in2 = load_reg(r2);
5541 #define SPEC_in2_r2_nz 0
5543 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5545 o->in2 = tcg_temp_new_i64();
5546 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5548 #define SPEC_in2_r2_8s 0
5550 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5552 o->in2 = tcg_temp_new_i64();
5553 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5555 #define SPEC_in2_r2_8u 0
5557 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5559 o->in2 = tcg_temp_new_i64();
5560 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5562 #define SPEC_in2_r2_16s 0
5564 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5566 o->in2 = tcg_temp_new_i64();
5567 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5569 #define SPEC_in2_r2_16u 0
5571 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5573 o->in2 = load_reg(get_field(f, r3));
5575 #define SPEC_in2_r3 0
5577 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5579 o->in2 = tcg_temp_new_i64();
5580 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5582 #define SPEC_in2_r3_sr32 0
5584 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5586 o->in2 = tcg_temp_new_i64();
5587 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5589 #define SPEC_in2_r2_32s 0
5591 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5593 o->in2 = tcg_temp_new_i64();
5594 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5596 #define SPEC_in2_r2_32u 0
5598 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5600 o->in2 = tcg_temp_new_i64();
5601 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5603 #define SPEC_in2_r2_sr32 0
5605 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5607 o->in2 = load_freg32_i64(get_field(f, r2));
5609 #define SPEC_in2_e2 0
5611 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5613 o->in2 = fregs[get_field(f, r2)];
5614 o->g_in2 = true;
5616 #define SPEC_in2_f2_o 0
5618 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5620 int r2 = get_field(f, r2);
5621 o->in1 = fregs[r2];
5622 o->in2 = fregs[r2 + 2];
5623 o->g_in1 = o->g_in2 = true;
5625 #define SPEC_in2_x2_o SPEC_r2_f128
5627 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5629 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5631 #define SPEC_in2_ra2 0
5633 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5635 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5636 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5638 #define SPEC_in2_a2 0
5640 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5642 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5644 #define SPEC_in2_ri2 0
5646 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5648 help_l2_shift(s, f, o, 31);
5650 #define SPEC_in2_sh32 0
5652 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5654 help_l2_shift(s, f, o, 63);
5656 #define SPEC_in2_sh64 0
5658 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5660 in2_a2(s, f, o);
5661 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5663 #define SPEC_in2_m2_8u 0
5665 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5667 in2_a2(s, f, o);
5668 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5670 #define SPEC_in2_m2_16s 0
5672 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5674 in2_a2(s, f, o);
5675 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5677 #define SPEC_in2_m2_16u 0
5679 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5681 in2_a2(s, f, o);
5682 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5684 #define SPEC_in2_m2_32s 0
5686 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5688 in2_a2(s, f, o);
5689 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5691 #define SPEC_in2_m2_32u 0
5693 #ifndef CONFIG_USER_ONLY
5694 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5696 in2_a2(s, f, o);
5697 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5699 #define SPEC_in2_m2_32ua 0
5700 #endif
5702 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5704 in2_a2(s, f, o);
5705 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5707 #define SPEC_in2_m2_64 0
5709 #ifndef CONFIG_USER_ONLY
5710 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5712 in2_a2(s, f, o);
5713 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5715 #define SPEC_in2_m2_64a 0
5716 #endif
5718 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5720 in2_ri2(s, f, o);
5721 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5723 #define SPEC_in2_mri2_16u 0
5725 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5727 in2_ri2(s, f, o);
5728 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5730 #define SPEC_in2_mri2_32s 0
5732 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5734 in2_ri2(s, f, o);
5735 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5737 #define SPEC_in2_mri2_32u 0
5739 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5741 in2_ri2(s, f, o);
5742 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5744 #define SPEC_in2_mri2_64 0
5746 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5748 o->in2 = tcg_const_i64(get_field(f, i2));
5750 #define SPEC_in2_i2 0
5752 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5754 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5756 #define SPEC_in2_i2_8u 0
5758 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5760 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5762 #define SPEC_in2_i2_16u 0
5764 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5766 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5768 #define SPEC_in2_i2_32u 0
5770 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5772 uint64_t i2 = (uint16_t)get_field(f, i2);
5773 o->in2 = tcg_const_i64(i2 << s->insn->data);
5775 #define SPEC_in2_i2_16u_shl 0
5777 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5779 uint64_t i2 = (uint32_t)get_field(f, i2);
5780 o->in2 = tcg_const_i64(i2 << s->insn->data);
5782 #define SPEC_in2_i2_32u_shl 0
5784 #ifndef CONFIG_USER_ONLY
5785 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5787 o->in2 = tcg_const_i64(s->fields->raw_insn);
5789 #define SPEC_in2_insn 0
5790 #endif
5792 /* ====================================================================== */
5794 /* Find opc within the table of insns. This is formulated as a switch
5795 statement so that (1) we get compile-time notice of cut-paste errors
5796 for duplicated opcodes, and (2) the compiler generates the binary
5797 search tree, rather than us having to post-process the table. */
5799 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5800 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5802 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5804 enum DisasInsnEnum {
5805 #include "insn-data.def"
5808 #undef D
5809 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5810 .opc = OPC, \
5811 .fmt = FMT_##FT, \
5812 .fac = FAC_##FC, \
5813 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5814 .name = #NM, \
5815 .help_in1 = in1_##I1, \
5816 .help_in2 = in2_##I2, \
5817 .help_prep = prep_##P, \
5818 .help_wout = wout_##W, \
5819 .help_cout = cout_##CC, \
5820 .help_op = op_##OP, \
5821 .data = D \
5824 /* Allow 0 to be used for NULL in the table below. */
5825 #define in1_0 NULL
5826 #define in2_0 NULL
5827 #define prep_0 NULL
5828 #define wout_0 NULL
5829 #define cout_0 NULL
5830 #define op_0 NULL
5832 #define SPEC_in1_0 0
5833 #define SPEC_in2_0 0
5834 #define SPEC_prep_0 0
5835 #define SPEC_wout_0 0
5837 /* Give smaller names to the various facilities. */
5838 #define FAC_Z S390_FEAT_ZARCH
5839 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5840 #define FAC_DFP S390_FEAT_DFP
5841 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5842 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5843 #define FAC_EE S390_FEAT_EXECUTE_EXT
5844 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5845 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5846 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5847 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5848 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5849 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5850 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5851 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5852 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5853 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5854 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5855 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5856 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5857 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5858 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5859 #define FAC_SFLE S390_FEAT_STFLE
5860 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5861 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5862 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5863 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5864 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5865 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5866 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5867 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5868 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5869 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5870 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5871 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5872 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5873 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5874 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5875 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5877 static const DisasInsn insn_info[] = {
5878 #include "insn-data.def"
5881 #undef D
5882 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5883 case OPC: return &insn_info[insn_ ## NM];
5885 static const DisasInsn *lookup_opc(uint16_t opc)
5887 switch (opc) {
5888 #include "insn-data.def"
5889 default:
5890 return NULL;
5894 #undef D
5895 #undef C
5897 /* Extract a field from the insn. The INSN should be left-aligned in
5898 the uint64_t so that we can more easily utilize the big-bit-endian
5899 definitions we extract from the Principals of Operation. */
5901 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5903 uint32_t r, m;
5905 if (f->size == 0) {
5906 return;
5909 /* Zero extract the field from the insn. */
5910 r = (insn << f->beg) >> (64 - f->size);
5912 /* Sign-extend, or un-swap the field as necessary. */
5913 switch (f->type) {
5914 case 0: /* unsigned */
5915 break;
5916 case 1: /* signed */
5917 assert(f->size <= 32);
5918 m = 1u << (f->size - 1);
5919 r = (r ^ m) - m;
5920 break;
5921 case 2: /* dl+dh split, signed 20 bit. */
5922 r = ((int8_t)r << 12) | (r >> 8);
5923 break;
5924 default:
5925 abort();
5928 /* Validate that the "compressed" encoding we selected above is valid.
5929 I.e. we havn't make two different original fields overlap. */
5930 assert(((o->presentC >> f->indexC) & 1) == 0);
5931 o->presentC |= 1 << f->indexC;
5932 o->presentO |= 1 << f->indexO;
5934 o->c[f->indexC] = r;
5937 /* Lookup the insn at the current PC, extracting the operands into O and
5938 returning the info struct for the insn. Returns NULL for invalid insn. */
5940 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5941 DisasFields *f)
5943 uint64_t insn, pc = s->base.pc_next;
5944 int op, op2, ilen;
5945 const DisasInsn *info;
5947 if (unlikely(s->ex_value)) {
5948 /* Drop the EX data now, so that it's clear on exception paths. */
5949 TCGv_i64 zero = tcg_const_i64(0);
5950 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5951 tcg_temp_free_i64(zero);
5953 /* Extract the values saved by EXECUTE. */
5954 insn = s->ex_value & 0xffffffffffff0000ull;
5955 ilen = s->ex_value & 0xf;
5956 op = insn >> 56;
5957 } else {
5958 insn = ld_code2(env, pc);
5959 op = (insn >> 8) & 0xff;
5960 ilen = get_ilen(op);
5961 switch (ilen) {
5962 case 2:
5963 insn = insn << 48;
5964 break;
5965 case 4:
5966 insn = ld_code4(env, pc) << 32;
5967 break;
5968 case 6:
5969 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5970 break;
5971 default:
5972 g_assert_not_reached();
5975 s->pc_tmp = s->base.pc_next + ilen;
5976 s->ilen = ilen;
5978 /* We can't actually determine the insn format until we've looked up
5979 the full insn opcode. Which we can't do without locating the
5980 secondary opcode. Assume by default that OP2 is at bit 40; for
5981 those smaller insns that don't actually have a secondary opcode
5982 this will correctly result in OP2 = 0. */
5983 switch (op) {
5984 case 0x01: /* E */
5985 case 0x80: /* S */
5986 case 0x82: /* S */
5987 case 0x93: /* S */
5988 case 0xb2: /* S, RRF, RRE, IE */
5989 case 0xb3: /* RRE, RRD, RRF */
5990 case 0xb9: /* RRE, RRF */
5991 case 0xe5: /* SSE, SIL */
5992 op2 = (insn << 8) >> 56;
5993 break;
5994 case 0xa5: /* RI */
5995 case 0xa7: /* RI */
5996 case 0xc0: /* RIL */
5997 case 0xc2: /* RIL */
5998 case 0xc4: /* RIL */
5999 case 0xc6: /* RIL */
6000 case 0xc8: /* SSF */
6001 case 0xcc: /* RIL */
6002 op2 = (insn << 12) >> 60;
6003 break;
6004 case 0xc5: /* MII */
6005 case 0xc7: /* SMI */
6006 case 0xd0 ... 0xdf: /* SS */
6007 case 0xe1: /* SS */
6008 case 0xe2: /* SS */
6009 case 0xe8: /* SS */
6010 case 0xe9: /* SS */
6011 case 0xea: /* SS */
6012 case 0xee ... 0xf3: /* SS */
6013 case 0xf8 ... 0xfd: /* SS */
6014 op2 = 0;
6015 break;
6016 default:
6017 op2 = (insn << 40) >> 56;
6018 break;
6021 memset(f, 0, sizeof(*f));
6022 f->raw_insn = insn;
6023 f->op = op;
6024 f->op2 = op2;
6026 /* Lookup the instruction. */
6027 info = lookup_opc(op << 8 | op2);
6029 /* If we found it, extract the operands. */
6030 if (info != NULL) {
6031 DisasFormat fmt = info->fmt;
6032 int i;
6034 for (i = 0; i < NUM_C_FIELD; ++i) {
6035 extract_field(f, &format_info[fmt].op[i], insn);
6038 return info;
6041 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6043 const DisasInsn *insn;
6044 DisasJumpType ret = DISAS_NEXT;
6045 DisasFields f;
6046 DisasOps o;
6048 /* Search for the insn in the table. */
6049 insn = extract_insn(env, s, &f);
6051 /* Not found means unimplemented/illegal opcode. */
6052 if (insn == NULL) {
6053 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6054 f.op, f.op2);
6055 gen_illegal_opcode(s);
6056 return DISAS_NORETURN;
6059 #ifndef CONFIG_USER_ONLY
6060 if (s->base.tb->flags & FLAG_MASK_PER) {
6061 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6062 gen_helper_per_ifetch(cpu_env, addr);
6063 tcg_temp_free_i64(addr);
6065 #endif
6067 /* Check for insn specification exceptions. */
6068 if (insn->spec) {
6069 int spec = insn->spec, excp = 0, r;
6071 if (spec & SPEC_r1_even) {
6072 r = get_field(&f, r1);
6073 if (r & 1) {
6074 excp = PGM_SPECIFICATION;
6077 if (spec & SPEC_r2_even) {
6078 r = get_field(&f, r2);
6079 if (r & 1) {
6080 excp = PGM_SPECIFICATION;
6083 if (spec & SPEC_r3_even) {
6084 r = get_field(&f, r3);
6085 if (r & 1) {
6086 excp = PGM_SPECIFICATION;
6089 if (spec & SPEC_r1_f128) {
6090 r = get_field(&f, r1);
6091 if (r > 13) {
6092 excp = PGM_SPECIFICATION;
6095 if (spec & SPEC_r2_f128) {
6096 r = get_field(&f, r2);
6097 if (r > 13) {
6098 excp = PGM_SPECIFICATION;
6101 if (excp) {
6102 gen_program_exception(s, excp);
6103 return DISAS_NORETURN;
6107 /* Set up the strutures we use to communicate with the helpers. */
6108 s->insn = insn;
6109 s->fields = &f;
6110 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6111 o.out = NULL;
6112 o.out2 = NULL;
6113 o.in1 = NULL;
6114 o.in2 = NULL;
6115 o.addr1 = NULL;
6117 /* Implement the instruction. */
6118 if (insn->help_in1) {
6119 insn->help_in1(s, &f, &o);
6121 if (insn->help_in2) {
6122 insn->help_in2(s, &f, &o);
6124 if (insn->help_prep) {
6125 insn->help_prep(s, &f, &o);
6127 if (insn->help_op) {
6128 ret = insn->help_op(s, &o);
6130 if (insn->help_wout) {
6131 insn->help_wout(s, &f, &o);
6133 if (insn->help_cout) {
6134 insn->help_cout(s, &o);
6137 /* Free any temporaries created by the helpers. */
6138 if (o.out && !o.g_out) {
6139 tcg_temp_free_i64(o.out);
6141 if (o.out2 && !o.g_out2) {
6142 tcg_temp_free_i64(o.out2);
6144 if (o.in1 && !o.g_in1) {
6145 tcg_temp_free_i64(o.in1);
6147 if (o.in2 && !o.g_in2) {
6148 tcg_temp_free_i64(o.in2);
6150 if (o.addr1) {
6151 tcg_temp_free_i64(o.addr1);
6154 #ifndef CONFIG_USER_ONLY
6155 if (s->base.tb->flags & FLAG_MASK_PER) {
6156 /* An exception might be triggered, save PSW if not already done. */
6157 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6158 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6161 /* Call the helper to check for a possible PER exception. */
6162 gen_helper_per_check_exception(cpu_env);
6164 #endif
6166 /* Advance to the next instruction. */
6167 s->base.pc_next = s->pc_tmp;
6168 return ret;
6171 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6173 DisasContext *dc = container_of(dcbase, DisasContext, base);
6175 /* 31-bit mode */
6176 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6177 dc->base.pc_first &= 0x7fffffff;
6178 dc->base.pc_next = dc->base.pc_first;
6181 dc->cc_op = CC_OP_DYNAMIC;
6182 dc->ex_value = dc->base.tb->cs_base;
6183 dc->do_debug = dc->base.singlestep_enabled;
6186 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6190 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6192 DisasContext *dc = container_of(dcbase, DisasContext, base);
6194 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6197 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6198 const CPUBreakpoint *bp)
6200 DisasContext *dc = container_of(dcbase, DisasContext, base);
6202 dc->base.is_jmp = DISAS_PC_STALE;
6203 dc->do_debug = true;
6204 /* The address covered by the breakpoint must be included in
6205 [tb->pc, tb->pc + tb->size) in order to for it to be
6206 properly cleared -- thus we increment the PC here so that
6207 the logic setting tb->size does the right thing. */
6208 dc->base.pc_next += 2;
6209 return true;
6212 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6214 CPUS390XState *env = cs->env_ptr;
6215 DisasContext *dc = container_of(dcbase, DisasContext, base);
6217 dc->base.is_jmp = translate_one(env, dc);
6218 if (dc->base.is_jmp == DISAS_NEXT) {
6219 uint64_t page_start;
6221 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6222 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6223 dc->base.is_jmp = DISAS_TOO_MANY;
6228 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6230 DisasContext *dc = container_of(dcbase, DisasContext, base);
6232 switch (dc->base.is_jmp) {
6233 case DISAS_GOTO_TB:
6234 case DISAS_NORETURN:
6235 break;
6236 case DISAS_TOO_MANY:
6237 case DISAS_PC_STALE:
6238 case DISAS_PC_STALE_NOCHAIN:
6239 update_psw_addr(dc);
6240 /* FALLTHRU */
6241 case DISAS_PC_UPDATED:
6242 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6243 cc op type is in env */
6244 update_cc_op(dc);
6245 /* FALLTHRU */
6246 case DISAS_PC_CC_UPDATED:
6247 /* Exit the TB, either by raising a debug exception or by return. */
6248 if (dc->do_debug) {
6249 gen_exception(EXCP_DEBUG);
6250 } else if (use_exit_tb(dc) ||
6251 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6252 tcg_gen_exit_tb(NULL, 0);
6253 } else {
6254 tcg_gen_lookup_and_goto_ptr();
6256 break;
6257 default:
6258 g_assert_not_reached();
6262 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6264 DisasContext *dc = container_of(dcbase, DisasContext, base);
6266 if (unlikely(dc->ex_value)) {
6267 /* ??? Unfortunately log_target_disas can't use host memory. */
6268 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6269 } else {
6270 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6271 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6275 static const TranslatorOps s390x_tr_ops = {
6276 .init_disas_context = s390x_tr_init_disas_context,
6277 .tb_start = s390x_tr_tb_start,
6278 .insn_start = s390x_tr_insn_start,
6279 .breakpoint_check = s390x_tr_breakpoint_check,
6280 .translate_insn = s390x_tr_translate_insn,
6281 .tb_stop = s390x_tr_tb_stop,
6282 .disas_log = s390x_tr_disas_log,
6285 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
6287 DisasContext dc;
6289 translator_loop(&s390x_tr_ops, &dc.base, cs, tb);
6292 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6293 target_ulong *data)
6295 int cc_op = data[1];
6296 env->psw.addr = data[0];
6297 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6298 env->cc_op = cc_op;