target/s390x: convert to DisasJumpType
[qemu/kevin.git] / target / s390x / translate.c
blobd08c109fe1fceced66a4b39c9b17ab380bf465ef
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
46 #include "exec/log.h"
49 /* Information that (most) every instruction needs to manipulate. */
50 typedef struct DisasContext DisasContext;
51 typedef struct DisasInsn DisasInsn;
52 typedef struct DisasFields DisasFields;
54 struct DisasContext {
55 struct TranslationBlock *tb;
56 const DisasInsn *insn;
57 DisasFields *fields;
58 uint64_t ex_value;
59 uint64_t pc, next_pc;
60 uint32_t ilen;
61 enum cc_op cc_op;
62 bool singlestep_enabled;
65 /* Information carried about a condition to be evaluated. */
66 typedef struct {
67 TCGCond cond:8;
68 bool is_64;
69 bool g1;
70 bool g2;
71 union {
72 struct { TCGv_i64 a, b; } s64;
73 struct { TCGv_i32 a, b; } s32;
74 } u;
75 } DisasCompare;
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
82 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
84 if (!(s->tb->flags & FLAG_MASK_64)) {
85 if (s->tb->flags & FLAG_MASK_32) {
86 return pc | 0x80000000;
89 return pc;
92 static TCGv_i64 psw_addr;
93 static TCGv_i64 psw_mask;
94 static TCGv_i64 gbea;
96 static TCGv_i32 cc_op;
97 static TCGv_i64 cc_src;
98 static TCGv_i64 cc_dst;
99 static TCGv_i64 cc_vr;
101 static char cpu_reg_names[32][4];
102 static TCGv_i64 regs[16];
103 static TCGv_i64 fregs[16];
105 void s390x_translate_init(void)
107 int i;
109 psw_addr = tcg_global_mem_new_i64(cpu_env,
110 offsetof(CPUS390XState, psw.addr),
111 "psw_addr");
112 psw_mask = tcg_global_mem_new_i64(cpu_env,
113 offsetof(CPUS390XState, psw.mask),
114 "psw_mask");
115 gbea = tcg_global_mem_new_i64(cpu_env,
116 offsetof(CPUS390XState, gbea),
117 "gbea");
119 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
120 "cc_op");
121 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
122 "cc_src");
123 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
124 "cc_dst");
125 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
126 "cc_vr");
128 for (i = 0; i < 16; i++) {
129 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
130 regs[i] = tcg_global_mem_new(cpu_env,
131 offsetof(CPUS390XState, regs[i]),
132 cpu_reg_names[i]);
135 for (i = 0; i < 16; i++) {
136 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
137 fregs[i] = tcg_global_mem_new(cpu_env,
138 offsetof(CPUS390XState, vregs[i][0].d),
139 cpu_reg_names[i + 16]);
143 static TCGv_i64 load_reg(int reg)
145 TCGv_i64 r = tcg_temp_new_i64();
146 tcg_gen_mov_i64(r, regs[reg]);
147 return r;
150 static TCGv_i64 load_freg32_i64(int reg)
152 TCGv_i64 r = tcg_temp_new_i64();
153 tcg_gen_shri_i64(r, fregs[reg], 32);
154 return r;
157 static void store_reg(int reg, TCGv_i64 v)
159 tcg_gen_mov_i64(regs[reg], v);
162 static void store_freg(int reg, TCGv_i64 v)
164 tcg_gen_mov_i64(fregs[reg], v);
167 static void store_reg32_i64(int reg, TCGv_i64 v)
169 /* 32 bit register writes keep the upper half */
170 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
173 static void store_reg32h_i64(int reg, TCGv_i64 v)
175 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
178 static void store_freg32_i64(int reg, TCGv_i64 v)
180 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
183 static void return_low128(TCGv_i64 dest)
185 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
188 static void update_psw_addr(DisasContext *s)
190 /* psw.addr */
191 tcg_gen_movi_i64(psw_addr, s->pc);
194 static void per_branch(DisasContext *s, bool to_next)
196 #ifndef CONFIG_USER_ONLY
197 tcg_gen_movi_i64(gbea, s->pc);
199 if (s->tb->flags & FLAG_MASK_PER) {
200 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
201 gen_helper_per_branch(cpu_env, gbea, next_pc);
202 if (to_next) {
203 tcg_temp_free_i64(next_pc);
206 #endif
209 static void per_branch_cond(DisasContext *s, TCGCond cond,
210 TCGv_i64 arg1, TCGv_i64 arg2)
212 #ifndef CONFIG_USER_ONLY
213 if (s->tb->flags & FLAG_MASK_PER) {
214 TCGLabel *lab = gen_new_label();
215 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
217 tcg_gen_movi_i64(gbea, s->pc);
218 gen_helper_per_branch(cpu_env, gbea, psw_addr);
220 gen_set_label(lab);
221 } else {
222 TCGv_i64 pc = tcg_const_i64(s->pc);
223 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
224 tcg_temp_free_i64(pc);
226 #endif
229 static void per_breaking_event(DisasContext *s)
231 tcg_gen_movi_i64(gbea, s->pc);
234 static void update_cc_op(DisasContext *s)
236 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
237 tcg_gen_movi_i32(cc_op, s->cc_op);
241 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
243 return (uint64_t)cpu_lduw_code(env, pc);
246 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
248 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
251 static int get_mem_index(DisasContext *s)
253 if (!(s->tb->flags & FLAG_MASK_DAT)) {
254 return MMU_REAL_IDX;
257 switch (s->tb->flags & FLAG_MASK_ASC) {
258 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
259 return MMU_PRIMARY_IDX;
260 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
261 return MMU_SECONDARY_IDX;
262 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
263 return MMU_HOME_IDX;
264 default:
265 tcg_abort();
266 break;
270 static void gen_exception(int excp)
272 TCGv_i32 tmp = tcg_const_i32(excp);
273 gen_helper_exception(cpu_env, tmp);
274 tcg_temp_free_i32(tmp);
277 static void gen_program_exception(DisasContext *s, int code)
279 TCGv_i32 tmp;
281 /* Remember what pgm exeption this was. */
282 tmp = tcg_const_i32(code);
283 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
284 tcg_temp_free_i32(tmp);
286 tmp = tcg_const_i32(s->ilen);
287 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
288 tcg_temp_free_i32(tmp);
290 /* update the psw */
291 update_psw_addr(s);
293 /* Save off cc. */
294 update_cc_op(s);
296 /* Trigger exception. */
297 gen_exception(EXCP_PGM);
300 static inline void gen_illegal_opcode(DisasContext *s)
302 gen_program_exception(s, PGM_OPERATION);
305 static inline void gen_trap(DisasContext *s)
307 TCGv_i32 t;
309 /* Set DXC to 0xff. */
310 t = tcg_temp_new_i32();
311 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
312 tcg_gen_ori_i32(t, t, 0xff00);
313 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
314 tcg_temp_free_i32(t);
316 gen_program_exception(s, PGM_DATA);
319 #ifndef CONFIG_USER_ONLY
320 static void check_privileged(DisasContext *s)
322 if (s->tb->flags & FLAG_MASK_PSTATE) {
323 gen_program_exception(s, PGM_PRIVILEGED);
326 #endif
328 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
330 TCGv_i64 tmp = tcg_temp_new_i64();
331 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
333 /* Note that d2 is limited to 20 bits, signed. If we crop negative
334 displacements early we create larger immedate addends. */
336 /* Note that addi optimizes the imm==0 case. */
337 if (b2 && x2) {
338 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
339 tcg_gen_addi_i64(tmp, tmp, d2);
340 } else if (b2) {
341 tcg_gen_addi_i64(tmp, regs[b2], d2);
342 } else if (x2) {
343 tcg_gen_addi_i64(tmp, regs[x2], d2);
344 } else {
345 if (need_31) {
346 d2 &= 0x7fffffff;
347 need_31 = false;
349 tcg_gen_movi_i64(tmp, d2);
351 if (need_31) {
352 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
355 return tmp;
358 static inline bool live_cc_data(DisasContext *s)
360 return (s->cc_op != CC_OP_DYNAMIC
361 && s->cc_op != CC_OP_STATIC
362 && s->cc_op > 3);
365 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
367 if (live_cc_data(s)) {
368 tcg_gen_discard_i64(cc_src);
369 tcg_gen_discard_i64(cc_dst);
370 tcg_gen_discard_i64(cc_vr);
372 s->cc_op = CC_OP_CONST0 + val;
375 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
377 if (live_cc_data(s)) {
378 tcg_gen_discard_i64(cc_src);
379 tcg_gen_discard_i64(cc_vr);
381 tcg_gen_mov_i64(cc_dst, dst);
382 s->cc_op = op;
385 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
386 TCGv_i64 dst)
388 if (live_cc_data(s)) {
389 tcg_gen_discard_i64(cc_vr);
391 tcg_gen_mov_i64(cc_src, src);
392 tcg_gen_mov_i64(cc_dst, dst);
393 s->cc_op = op;
396 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
397 TCGv_i64 dst, TCGv_i64 vr)
399 tcg_gen_mov_i64(cc_src, src);
400 tcg_gen_mov_i64(cc_dst, dst);
401 tcg_gen_mov_i64(cc_vr, vr);
402 s->cc_op = op;
405 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
407 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
410 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
412 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
415 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
417 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
420 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
422 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
425 /* CC value is in env->cc_op */
426 static void set_cc_static(DisasContext *s)
428 if (live_cc_data(s)) {
429 tcg_gen_discard_i64(cc_src);
430 tcg_gen_discard_i64(cc_dst);
431 tcg_gen_discard_i64(cc_vr);
433 s->cc_op = CC_OP_STATIC;
436 /* calculates cc into cc_op */
437 static void gen_op_calc_cc(DisasContext *s)
439 TCGv_i32 local_cc_op = NULL;
440 TCGv_i64 dummy = NULL;
442 switch (s->cc_op) {
443 default:
444 dummy = tcg_const_i64(0);
445 /* FALLTHRU */
446 case CC_OP_ADD_64:
447 case CC_OP_ADDU_64:
448 case CC_OP_ADDC_64:
449 case CC_OP_SUB_64:
450 case CC_OP_SUBU_64:
451 case CC_OP_SUBB_64:
452 case CC_OP_ADD_32:
453 case CC_OP_ADDU_32:
454 case CC_OP_ADDC_32:
455 case CC_OP_SUB_32:
456 case CC_OP_SUBU_32:
457 case CC_OP_SUBB_32:
458 local_cc_op = tcg_const_i32(s->cc_op);
459 break;
460 case CC_OP_CONST0:
461 case CC_OP_CONST1:
462 case CC_OP_CONST2:
463 case CC_OP_CONST3:
464 case CC_OP_STATIC:
465 case CC_OP_DYNAMIC:
466 break;
469 switch (s->cc_op) {
470 case CC_OP_CONST0:
471 case CC_OP_CONST1:
472 case CC_OP_CONST2:
473 case CC_OP_CONST3:
474 /* s->cc_op is the cc value */
475 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
476 break;
477 case CC_OP_STATIC:
478 /* env->cc_op already is the cc value */
479 break;
480 case CC_OP_NZ:
481 case CC_OP_ABS_64:
482 case CC_OP_NABS_64:
483 case CC_OP_ABS_32:
484 case CC_OP_NABS_32:
485 case CC_OP_LTGT0_32:
486 case CC_OP_LTGT0_64:
487 case CC_OP_COMP_32:
488 case CC_OP_COMP_64:
489 case CC_OP_NZ_F32:
490 case CC_OP_NZ_F64:
491 case CC_OP_FLOGR:
492 /* 1 argument */
493 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
494 break;
495 case CC_OP_ICM:
496 case CC_OP_LTGT_32:
497 case CC_OP_LTGT_64:
498 case CC_OP_LTUGTU_32:
499 case CC_OP_LTUGTU_64:
500 case CC_OP_TM_32:
501 case CC_OP_TM_64:
502 case CC_OP_SLA_32:
503 case CC_OP_SLA_64:
504 case CC_OP_NZ_F128:
505 /* 2 arguments */
506 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
507 break;
508 case CC_OP_ADD_64:
509 case CC_OP_ADDU_64:
510 case CC_OP_ADDC_64:
511 case CC_OP_SUB_64:
512 case CC_OP_SUBU_64:
513 case CC_OP_SUBB_64:
514 case CC_OP_ADD_32:
515 case CC_OP_ADDU_32:
516 case CC_OP_ADDC_32:
517 case CC_OP_SUB_32:
518 case CC_OP_SUBU_32:
519 case CC_OP_SUBB_32:
520 /* 3 arguments */
521 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
522 break;
523 case CC_OP_DYNAMIC:
524 /* unknown operation - assume 3 arguments and cc_op in env */
525 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
526 break;
527 default:
528 tcg_abort();
531 if (local_cc_op) {
532 tcg_temp_free_i32(local_cc_op);
534 if (dummy) {
535 tcg_temp_free_i64(dummy);
538 /* We now have cc in cc_op as constant */
539 set_cc_static(s);
542 static bool use_exit_tb(DisasContext *s)
544 return (s->singlestep_enabled ||
545 (tb_cflags(s->tb) & CF_LAST_IO) ||
546 (s->tb->flags & FLAG_MASK_PER));
549 static bool use_goto_tb(DisasContext *s, uint64_t dest)
551 if (unlikely(use_exit_tb(s))) {
552 return false;
554 #ifndef CONFIG_USER_ONLY
555 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
556 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
557 #else
558 return true;
559 #endif
562 static void account_noninline_branch(DisasContext *s, int cc_op)
564 #ifdef DEBUG_INLINE_BRANCHES
565 inline_branch_miss[cc_op]++;
566 #endif
569 static void account_inline_branch(DisasContext *s, int cc_op)
571 #ifdef DEBUG_INLINE_BRANCHES
572 inline_branch_hit[cc_op]++;
573 #endif
576 /* Table of mask values to comparison codes, given a comparison as input.
577 For such, CC=3 should not be possible. */
578 static const TCGCond ltgt_cond[16] = {
579 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
580 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
581 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
582 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
583 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
584 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
585 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
586 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
589 /* Table of mask values to comparison codes, given a logic op as input.
590 For such, only CC=0 and CC=1 should be possible. */
591 static const TCGCond nz_cond[16] = {
592 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
593 TCG_COND_NEVER, TCG_COND_NEVER,
594 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
595 TCG_COND_NE, TCG_COND_NE,
596 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
597 TCG_COND_EQ, TCG_COND_EQ,
598 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
599 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
602 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
603 details required to generate a TCG comparison. */
604 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
606 TCGCond cond;
607 enum cc_op old_cc_op = s->cc_op;
609 if (mask == 15 || mask == 0) {
610 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
611 c->u.s32.a = cc_op;
612 c->u.s32.b = cc_op;
613 c->g1 = c->g2 = true;
614 c->is_64 = false;
615 return;
618 /* Find the TCG condition for the mask + cc op. */
619 switch (old_cc_op) {
620 case CC_OP_LTGT0_32:
621 case CC_OP_LTGT0_64:
622 case CC_OP_LTGT_32:
623 case CC_OP_LTGT_64:
624 cond = ltgt_cond[mask];
625 if (cond == TCG_COND_NEVER) {
626 goto do_dynamic;
628 account_inline_branch(s, old_cc_op);
629 break;
631 case CC_OP_LTUGTU_32:
632 case CC_OP_LTUGTU_64:
633 cond = tcg_unsigned_cond(ltgt_cond[mask]);
634 if (cond == TCG_COND_NEVER) {
635 goto do_dynamic;
637 account_inline_branch(s, old_cc_op);
638 break;
640 case CC_OP_NZ:
641 cond = nz_cond[mask];
642 if (cond == TCG_COND_NEVER) {
643 goto do_dynamic;
645 account_inline_branch(s, old_cc_op);
646 break;
648 case CC_OP_TM_32:
649 case CC_OP_TM_64:
650 switch (mask) {
651 case 8:
652 cond = TCG_COND_EQ;
653 break;
654 case 4 | 2 | 1:
655 cond = TCG_COND_NE;
656 break;
657 default:
658 goto do_dynamic;
660 account_inline_branch(s, old_cc_op);
661 break;
663 case CC_OP_ICM:
664 switch (mask) {
665 case 8:
666 cond = TCG_COND_EQ;
667 break;
668 case 4 | 2 | 1:
669 case 4 | 2:
670 cond = TCG_COND_NE;
671 break;
672 default:
673 goto do_dynamic;
675 account_inline_branch(s, old_cc_op);
676 break;
678 case CC_OP_FLOGR:
679 switch (mask & 0xa) {
680 case 8: /* src == 0 -> no one bit found */
681 cond = TCG_COND_EQ;
682 break;
683 case 2: /* src != 0 -> one bit found */
684 cond = TCG_COND_NE;
685 break;
686 default:
687 goto do_dynamic;
689 account_inline_branch(s, old_cc_op);
690 break;
692 case CC_OP_ADDU_32:
693 case CC_OP_ADDU_64:
694 switch (mask) {
695 case 8 | 2: /* vr == 0 */
696 cond = TCG_COND_EQ;
697 break;
698 case 4 | 1: /* vr != 0 */
699 cond = TCG_COND_NE;
700 break;
701 case 8 | 4: /* no carry -> vr >= src */
702 cond = TCG_COND_GEU;
703 break;
704 case 2 | 1: /* carry -> vr < src */
705 cond = TCG_COND_LTU;
706 break;
707 default:
708 goto do_dynamic;
710 account_inline_branch(s, old_cc_op);
711 break;
713 case CC_OP_SUBU_32:
714 case CC_OP_SUBU_64:
715 /* Note that CC=0 is impossible; treat it as dont-care. */
716 switch (mask & 7) {
717 case 2: /* zero -> op1 == op2 */
718 cond = TCG_COND_EQ;
719 break;
720 case 4 | 1: /* !zero -> op1 != op2 */
721 cond = TCG_COND_NE;
722 break;
723 case 4: /* borrow (!carry) -> op1 < op2 */
724 cond = TCG_COND_LTU;
725 break;
726 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
727 cond = TCG_COND_GEU;
728 break;
729 default:
730 goto do_dynamic;
732 account_inline_branch(s, old_cc_op);
733 break;
735 default:
736 do_dynamic:
737 /* Calculate cc value. */
738 gen_op_calc_cc(s);
739 /* FALLTHRU */
741 case CC_OP_STATIC:
742 /* Jump based on CC. We'll load up the real cond below;
743 the assignment here merely avoids a compiler warning. */
744 account_noninline_branch(s, old_cc_op);
745 old_cc_op = CC_OP_STATIC;
746 cond = TCG_COND_NEVER;
747 break;
750 /* Load up the arguments of the comparison. */
751 c->is_64 = true;
752 c->g1 = c->g2 = false;
753 switch (old_cc_op) {
754 case CC_OP_LTGT0_32:
755 c->is_64 = false;
756 c->u.s32.a = tcg_temp_new_i32();
757 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
758 c->u.s32.b = tcg_const_i32(0);
759 break;
760 case CC_OP_LTGT_32:
761 case CC_OP_LTUGTU_32:
762 case CC_OP_SUBU_32:
763 c->is_64 = false;
764 c->u.s32.a = tcg_temp_new_i32();
765 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
766 c->u.s32.b = tcg_temp_new_i32();
767 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
768 break;
770 case CC_OP_LTGT0_64:
771 case CC_OP_NZ:
772 case CC_OP_FLOGR:
773 c->u.s64.a = cc_dst;
774 c->u.s64.b = tcg_const_i64(0);
775 c->g1 = true;
776 break;
777 case CC_OP_LTGT_64:
778 case CC_OP_LTUGTU_64:
779 case CC_OP_SUBU_64:
780 c->u.s64.a = cc_src;
781 c->u.s64.b = cc_dst;
782 c->g1 = c->g2 = true;
783 break;
785 case CC_OP_TM_32:
786 case CC_OP_TM_64:
787 case CC_OP_ICM:
788 c->u.s64.a = tcg_temp_new_i64();
789 c->u.s64.b = tcg_const_i64(0);
790 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
791 break;
793 case CC_OP_ADDU_32:
794 c->is_64 = false;
795 c->u.s32.a = tcg_temp_new_i32();
796 c->u.s32.b = tcg_temp_new_i32();
797 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
798 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
799 tcg_gen_movi_i32(c->u.s32.b, 0);
800 } else {
801 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
803 break;
805 case CC_OP_ADDU_64:
806 c->u.s64.a = cc_vr;
807 c->g1 = true;
808 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
809 c->u.s64.b = tcg_const_i64(0);
810 } else {
811 c->u.s64.b = cc_src;
812 c->g2 = true;
814 break;
816 case CC_OP_STATIC:
817 c->is_64 = false;
818 c->u.s32.a = cc_op;
819 c->g1 = true;
820 switch (mask) {
821 case 0x8 | 0x4 | 0x2: /* cc != 3 */
822 cond = TCG_COND_NE;
823 c->u.s32.b = tcg_const_i32(3);
824 break;
825 case 0x8 | 0x4 | 0x1: /* cc != 2 */
826 cond = TCG_COND_NE;
827 c->u.s32.b = tcg_const_i32(2);
828 break;
829 case 0x8 | 0x2 | 0x1: /* cc != 1 */
830 cond = TCG_COND_NE;
831 c->u.s32.b = tcg_const_i32(1);
832 break;
833 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
834 cond = TCG_COND_EQ;
835 c->g1 = false;
836 c->u.s32.a = tcg_temp_new_i32();
837 c->u.s32.b = tcg_const_i32(0);
838 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
839 break;
840 case 0x8 | 0x4: /* cc < 2 */
841 cond = TCG_COND_LTU;
842 c->u.s32.b = tcg_const_i32(2);
843 break;
844 case 0x8: /* cc == 0 */
845 cond = TCG_COND_EQ;
846 c->u.s32.b = tcg_const_i32(0);
847 break;
848 case 0x4 | 0x2 | 0x1: /* cc != 0 */
849 cond = TCG_COND_NE;
850 c->u.s32.b = tcg_const_i32(0);
851 break;
852 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
853 cond = TCG_COND_NE;
854 c->g1 = false;
855 c->u.s32.a = tcg_temp_new_i32();
856 c->u.s32.b = tcg_const_i32(0);
857 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
858 break;
859 case 0x4: /* cc == 1 */
860 cond = TCG_COND_EQ;
861 c->u.s32.b = tcg_const_i32(1);
862 break;
863 case 0x2 | 0x1: /* cc > 1 */
864 cond = TCG_COND_GTU;
865 c->u.s32.b = tcg_const_i32(1);
866 break;
867 case 0x2: /* cc == 2 */
868 cond = TCG_COND_EQ;
869 c->u.s32.b = tcg_const_i32(2);
870 break;
871 case 0x1: /* cc == 3 */
872 cond = TCG_COND_EQ;
873 c->u.s32.b = tcg_const_i32(3);
874 break;
875 default:
876 /* CC is masked by something else: (8 >> cc) & mask. */
877 cond = TCG_COND_NE;
878 c->g1 = false;
879 c->u.s32.a = tcg_const_i32(8);
880 c->u.s32.b = tcg_const_i32(0);
881 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
882 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
883 break;
885 break;
887 default:
888 abort();
890 c->cond = cond;
893 static void free_compare(DisasCompare *c)
895 if (!c->g1) {
896 if (c->is_64) {
897 tcg_temp_free_i64(c->u.s64.a);
898 } else {
899 tcg_temp_free_i32(c->u.s32.a);
902 if (!c->g2) {
903 if (c->is_64) {
904 tcg_temp_free_i64(c->u.s64.b);
905 } else {
906 tcg_temp_free_i32(c->u.s32.b);
911 /* ====================================================================== */
912 /* Define the insn format enumeration. */
913 #define F0(N) FMT_##N,
914 #define F1(N, X1) F0(N)
915 #define F2(N, X1, X2) F0(N)
916 #define F3(N, X1, X2, X3) F0(N)
917 #define F4(N, X1, X2, X3, X4) F0(N)
918 #define F5(N, X1, X2, X3, X4, X5) F0(N)
920 typedef enum {
921 #include "insn-format.def"
922 } DisasFormat;
924 #undef F0
925 #undef F1
926 #undef F2
927 #undef F3
928 #undef F4
929 #undef F5
931 /* Define a structure to hold the decoded fields. We'll store each inside
932 an array indexed by an enum. In order to conserve memory, we'll arrange
933 for fields that do not exist at the same time to overlap, thus the "C"
934 for compact. For checking purposes there is an "O" for original index
935 as well that will be applied to availability bitmaps. */
937 enum DisasFieldIndexO {
938 FLD_O_r1,
939 FLD_O_r2,
940 FLD_O_r3,
941 FLD_O_m1,
942 FLD_O_m3,
943 FLD_O_m4,
944 FLD_O_b1,
945 FLD_O_b2,
946 FLD_O_b4,
947 FLD_O_d1,
948 FLD_O_d2,
949 FLD_O_d4,
950 FLD_O_x2,
951 FLD_O_l1,
952 FLD_O_l2,
953 FLD_O_i1,
954 FLD_O_i2,
955 FLD_O_i3,
956 FLD_O_i4,
957 FLD_O_i5
960 enum DisasFieldIndexC {
961 FLD_C_r1 = 0,
962 FLD_C_m1 = 0,
963 FLD_C_b1 = 0,
964 FLD_C_i1 = 0,
966 FLD_C_r2 = 1,
967 FLD_C_b2 = 1,
968 FLD_C_i2 = 1,
970 FLD_C_r3 = 2,
971 FLD_C_m3 = 2,
972 FLD_C_i3 = 2,
974 FLD_C_m4 = 3,
975 FLD_C_b4 = 3,
976 FLD_C_i4 = 3,
977 FLD_C_l1 = 3,
979 FLD_C_i5 = 4,
980 FLD_C_d1 = 4,
982 FLD_C_d2 = 5,
984 FLD_C_d4 = 6,
985 FLD_C_x2 = 6,
986 FLD_C_l2 = 6,
988 NUM_C_FIELD = 7
991 struct DisasFields {
992 uint64_t raw_insn;
993 unsigned op:8;
994 unsigned op2:8;
995 unsigned presentC:16;
996 unsigned int presentO;
997 int c[NUM_C_FIELD];
1000 /* This is the way fields are to be accessed out of DisasFields. */
1001 #define have_field(S, F) have_field1((S), FLD_O_##F)
1002 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1004 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1006 return (f->presentO >> c) & 1;
1009 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1010 enum DisasFieldIndexC c)
1012 assert(have_field1(f, o));
1013 return f->c[c];
1016 /* Describe the layout of each field in each format. */
1017 typedef struct DisasField {
1018 unsigned int beg:8;
1019 unsigned int size:8;
1020 unsigned int type:2;
1021 unsigned int indexC:6;
1022 enum DisasFieldIndexO indexO:8;
1023 } DisasField;
1025 typedef struct DisasFormatInfo {
1026 DisasField op[NUM_C_FIELD];
1027 } DisasFormatInfo;
1029 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1030 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1031 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1032 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1033 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1035 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1036 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1037 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1038 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1040 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1041 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1042 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1044 #define F0(N) { { } },
1045 #define F1(N, X1) { { X1 } },
1046 #define F2(N, X1, X2) { { X1, X2 } },
1047 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1048 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1049 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1051 static const DisasFormatInfo format_info[] = {
1052 #include "insn-format.def"
1055 #undef F0
1056 #undef F1
1057 #undef F2
1058 #undef F3
1059 #undef F4
1060 #undef F5
1061 #undef R
1062 #undef M
1063 #undef BD
1064 #undef BXD
1065 #undef BDL
1066 #undef BXDL
1067 #undef I
1068 #undef L
1070 /* Generally, we'll extract operands into this structures, operate upon
1071 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1072 of routines below for more details. */
1073 typedef struct {
1074 bool g_out, g_out2, g_in1, g_in2;
1075 TCGv_i64 out, out2, in1, in2;
1076 TCGv_i64 addr1;
1077 } DisasOps;
1079 /* Instructions can place constraints on their operands, raising specification
1080 exceptions if they are violated. To make this easy to automate, each "in1",
1081 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1082 of the following, or 0. To make this easy to document, we'll put the
1083 SPEC_<name> defines next to <name>. */
1085 #define SPEC_r1_even 1
1086 #define SPEC_r2_even 2
1087 #define SPEC_r3_even 4
1088 #define SPEC_r1_f128 8
1089 #define SPEC_r2_f128 16
1091 /* Return values from translate_one, indicating the state of the TB. */
1093 /* We are not using a goto_tb (for whatever reason), but have updated
1094 the PC (for whatever reason), so there's no need to do it again on
1095 exiting the TB. */
1096 #define DISAS_PC_UPDATED DISAS_TARGET_0
1098 /* We have emitted one or more goto_tb. No fixup required. */
1099 #define DISAS_GOTO_TB DISAS_TARGET_1
1101 /* We have updated the PC and CC values. */
1102 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1104 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1105 updated the PC for the next instruction to be executed. */
1106 #define DISAS_PC_STALE DISAS_TARGET_3
1108 /* We are exiting the TB to the main loop. */
1109 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1111 struct DisasInsn {
1112 unsigned opc:16;
1113 DisasFormat fmt:8;
1114 unsigned fac:8;
1115 unsigned spec:8;
1117 const char *name;
1119 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1120 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1121 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1122 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1123 void (*help_cout)(DisasContext *, DisasOps *);
1124 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1126 uint64_t data;
1129 /* ====================================================================== */
1130 /* Miscellaneous helpers, used by several operations. */
1132 static void help_l2_shift(DisasContext *s, DisasFields *f,
1133 DisasOps *o, int mask)
1135 int b2 = get_field(f, b2);
1136 int d2 = get_field(f, d2);
1138 if (b2 == 0) {
1139 o->in2 = tcg_const_i64(d2 & mask);
1140 } else {
1141 o->in2 = get_address(s, 0, b2, d2);
1142 tcg_gen_andi_i64(o->in2, o->in2, mask);
1146 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1148 if (dest == s->next_pc) {
1149 per_branch(s, true);
1150 return DISAS_NEXT;
1152 if (use_goto_tb(s, dest)) {
1153 update_cc_op(s);
1154 per_breaking_event(s);
1155 tcg_gen_goto_tb(0);
1156 tcg_gen_movi_i64(psw_addr, dest);
1157 tcg_gen_exit_tb((uintptr_t)s->tb);
1158 return DISAS_GOTO_TB;
1159 } else {
1160 tcg_gen_movi_i64(psw_addr, dest);
1161 per_branch(s, false);
1162 return DISAS_PC_UPDATED;
1166 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1167 bool is_imm, int imm, TCGv_i64 cdest)
1169 DisasJumpType ret;
1170 uint64_t dest = s->pc + 2 * imm;
1171 TCGLabel *lab;
1173 /* Take care of the special cases first. */
1174 if (c->cond == TCG_COND_NEVER) {
1175 ret = DISAS_NEXT;
1176 goto egress;
1178 if (is_imm) {
1179 if (dest == s->next_pc) {
1180 /* Branch to next. */
1181 per_branch(s, true);
1182 ret = DISAS_NEXT;
1183 goto egress;
1185 if (c->cond == TCG_COND_ALWAYS) {
1186 ret = help_goto_direct(s, dest);
1187 goto egress;
1189 } else {
1190 if (!cdest) {
1191 /* E.g. bcr %r0 -> no branch. */
1192 ret = DISAS_NEXT;
1193 goto egress;
1195 if (c->cond == TCG_COND_ALWAYS) {
1196 tcg_gen_mov_i64(psw_addr, cdest);
1197 per_branch(s, false);
1198 ret = DISAS_PC_UPDATED;
1199 goto egress;
1203 if (use_goto_tb(s, s->next_pc)) {
1204 if (is_imm && use_goto_tb(s, dest)) {
1205 /* Both exits can use goto_tb. */
1206 update_cc_op(s);
1208 lab = gen_new_label();
1209 if (c->is_64) {
1210 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1211 } else {
1212 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1215 /* Branch not taken. */
1216 tcg_gen_goto_tb(0);
1217 tcg_gen_movi_i64(psw_addr, s->next_pc);
1218 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1220 /* Branch taken. */
1221 gen_set_label(lab);
1222 per_breaking_event(s);
1223 tcg_gen_goto_tb(1);
1224 tcg_gen_movi_i64(psw_addr, dest);
1225 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1227 ret = DISAS_GOTO_TB;
1228 } else {
1229 /* Fallthru can use goto_tb, but taken branch cannot. */
1230 /* Store taken branch destination before the brcond. This
1231 avoids having to allocate a new local temp to hold it.
1232 We'll overwrite this in the not taken case anyway. */
1233 if (!is_imm) {
1234 tcg_gen_mov_i64(psw_addr, cdest);
1237 lab = gen_new_label();
1238 if (c->is_64) {
1239 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1240 } else {
1241 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1244 /* Branch not taken. */
1245 update_cc_op(s);
1246 tcg_gen_goto_tb(0);
1247 tcg_gen_movi_i64(psw_addr, s->next_pc);
1248 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1250 gen_set_label(lab);
1251 if (is_imm) {
1252 tcg_gen_movi_i64(psw_addr, dest);
1254 per_breaking_event(s);
1255 ret = DISAS_PC_UPDATED;
1257 } else {
1258 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1259 Most commonly we're single-stepping or some other condition that
1260 disables all use of goto_tb. Just update the PC and exit. */
1262 TCGv_i64 next = tcg_const_i64(s->next_pc);
1263 if (is_imm) {
1264 cdest = tcg_const_i64(dest);
1267 if (c->is_64) {
1268 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1269 cdest, next);
1270 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1271 } else {
1272 TCGv_i32 t0 = tcg_temp_new_i32();
1273 TCGv_i64 t1 = tcg_temp_new_i64();
1274 TCGv_i64 z = tcg_const_i64(0);
1275 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1276 tcg_gen_extu_i32_i64(t1, t0);
1277 tcg_temp_free_i32(t0);
1278 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1279 per_branch_cond(s, TCG_COND_NE, t1, z);
1280 tcg_temp_free_i64(t1);
1281 tcg_temp_free_i64(z);
1284 if (is_imm) {
1285 tcg_temp_free_i64(cdest);
1287 tcg_temp_free_i64(next);
1289 ret = DISAS_PC_UPDATED;
1292 egress:
1293 free_compare(c);
1294 return ret;
1297 /* ====================================================================== */
1298 /* The operations. These perform the bulk of the work for any insn,
1299 usually after the operands have been loaded and output initialized. */
1301 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1303 TCGv_i64 z, n;
1304 z = tcg_const_i64(0);
1305 n = tcg_temp_new_i64();
1306 tcg_gen_neg_i64(n, o->in2);
1307 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1308 tcg_temp_free_i64(n);
1309 tcg_temp_free_i64(z);
1310 return DISAS_NEXT;
1313 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1315 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1316 return DISAS_NEXT;
1319 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1321 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1322 return DISAS_NEXT;
1325 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1327 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1328 tcg_gen_mov_i64(o->out2, o->in2);
1329 return DISAS_NEXT;
1332 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1334 tcg_gen_add_i64(o->out, o->in1, o->in2);
1335 return DISAS_NEXT;
1338 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1340 DisasCompare cmp;
1341 TCGv_i64 carry;
1343 tcg_gen_add_i64(o->out, o->in1, o->in2);
1345 /* The carry flag is the msb of CC, therefore the branch mask that would
1346 create that comparison is 3. Feeding the generated comparison to
1347 setcond produces the carry flag that we desire. */
1348 disas_jcc(s, &cmp, 3);
1349 carry = tcg_temp_new_i64();
1350 if (cmp.is_64) {
1351 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1352 } else {
1353 TCGv_i32 t = tcg_temp_new_i32();
1354 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1355 tcg_gen_extu_i32_i64(carry, t);
1356 tcg_temp_free_i32(t);
1358 free_compare(&cmp);
1360 tcg_gen_add_i64(o->out, o->out, carry);
1361 tcg_temp_free_i64(carry);
1362 return DISAS_NEXT;
1365 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1367 o->in1 = tcg_temp_new_i64();
1369 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1370 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1371 } else {
1372 /* Perform the atomic addition in memory. */
1373 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1374 s->insn->data);
1377 /* Recompute also for atomic case: needed for setting CC. */
1378 tcg_gen_add_i64(o->out, o->in1, o->in2);
1380 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1381 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1383 return DISAS_NEXT;
1386 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1388 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1389 return DISAS_NEXT;
1392 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1394 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1395 return DISAS_NEXT;
1398 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1400 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1401 return_low128(o->out2);
1402 return DISAS_NEXT;
1405 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1407 tcg_gen_and_i64(o->out, o->in1, o->in2);
1408 return DISAS_NEXT;
1411 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1413 int shift = s->insn->data & 0xff;
1414 int size = s->insn->data >> 8;
1415 uint64_t mask = ((1ull << size) - 1) << shift;
1417 assert(!o->g_in2);
1418 tcg_gen_shli_i64(o->in2, o->in2, shift);
1419 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1420 tcg_gen_and_i64(o->out, o->in1, o->in2);
1422 /* Produce the CC from only the bits manipulated. */
1423 tcg_gen_andi_i64(cc_dst, o->out, mask);
1424 set_cc_nz_u64(s, cc_dst);
1425 return DISAS_NEXT;
1428 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1430 o->in1 = tcg_temp_new_i64();
1432 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1433 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1434 } else {
1435 /* Perform the atomic operation in memory. */
1436 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1437 s->insn->data);
1440 /* Recompute also for atomic case: needed for setting CC. */
1441 tcg_gen_and_i64(o->out, o->in1, o->in2);
1443 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1444 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1446 return DISAS_NEXT;
1449 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1451 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1452 if (o->in2) {
1453 tcg_gen_mov_i64(psw_addr, o->in2);
1454 per_branch(s, false);
1455 return DISAS_PC_UPDATED;
1456 } else {
1457 return DISAS_NEXT;
1461 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1463 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1464 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1467 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1469 int m1 = get_field(s->fields, m1);
1470 bool is_imm = have_field(s->fields, i2);
1471 int imm = is_imm ? get_field(s->fields, i2) : 0;
1472 DisasCompare c;
1474 /* BCR with R2 = 0 causes no branching */
1475 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1476 if (m1 == 14) {
1477 /* Perform serialization */
1478 /* FIXME: check for fast-BCR-serialization facility */
1479 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1481 if (m1 == 15) {
1482 /* Perform serialization */
1483 /* FIXME: perform checkpoint-synchronisation */
1484 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1486 return DISAS_NEXT;
1489 disas_jcc(s, &c, m1);
1490 return help_branch(s, &c, is_imm, imm, o->in2);
1493 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1495 int r1 = get_field(s->fields, r1);
1496 bool is_imm = have_field(s->fields, i2);
1497 int imm = is_imm ? get_field(s->fields, i2) : 0;
1498 DisasCompare c;
1499 TCGv_i64 t;
1501 c.cond = TCG_COND_NE;
1502 c.is_64 = false;
1503 c.g1 = false;
1504 c.g2 = false;
1506 t = tcg_temp_new_i64();
1507 tcg_gen_subi_i64(t, regs[r1], 1);
1508 store_reg32_i64(r1, t);
1509 c.u.s32.a = tcg_temp_new_i32();
1510 c.u.s32.b = tcg_const_i32(0);
1511 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1512 tcg_temp_free_i64(t);
1514 return help_branch(s, &c, is_imm, imm, o->in2);
1517 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1519 int r1 = get_field(s->fields, r1);
1520 int imm = get_field(s->fields, i2);
1521 DisasCompare c;
1522 TCGv_i64 t;
1524 c.cond = TCG_COND_NE;
1525 c.is_64 = false;
1526 c.g1 = false;
1527 c.g2 = false;
1529 t = tcg_temp_new_i64();
1530 tcg_gen_shri_i64(t, regs[r1], 32);
1531 tcg_gen_subi_i64(t, t, 1);
1532 store_reg32h_i64(r1, t);
1533 c.u.s32.a = tcg_temp_new_i32();
1534 c.u.s32.b = tcg_const_i32(0);
1535 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1536 tcg_temp_free_i64(t);
1538 return help_branch(s, &c, 1, imm, o->in2);
1541 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1543 int r1 = get_field(s->fields, r1);
1544 bool is_imm = have_field(s->fields, i2);
1545 int imm = is_imm ? get_field(s->fields, i2) : 0;
1546 DisasCompare c;
1548 c.cond = TCG_COND_NE;
1549 c.is_64 = true;
1550 c.g1 = true;
1551 c.g2 = false;
1553 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1554 c.u.s64.a = regs[r1];
1555 c.u.s64.b = tcg_const_i64(0);
1557 return help_branch(s, &c, is_imm, imm, o->in2);
1560 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1562 int r1 = get_field(s->fields, r1);
1563 int r3 = get_field(s->fields, r3);
1564 bool is_imm = have_field(s->fields, i2);
1565 int imm = is_imm ? get_field(s->fields, i2) : 0;
1566 DisasCompare c;
1567 TCGv_i64 t;
1569 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1570 c.is_64 = false;
1571 c.g1 = false;
1572 c.g2 = false;
1574 t = tcg_temp_new_i64();
1575 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1576 c.u.s32.a = tcg_temp_new_i32();
1577 c.u.s32.b = tcg_temp_new_i32();
1578 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1579 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1580 store_reg32_i64(r1, t);
1581 tcg_temp_free_i64(t);
1583 return help_branch(s, &c, is_imm, imm, o->in2);
1586 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1588 int r1 = get_field(s->fields, r1);
1589 int r3 = get_field(s->fields, r3);
1590 bool is_imm = have_field(s->fields, i2);
1591 int imm = is_imm ? get_field(s->fields, i2) : 0;
1592 DisasCompare c;
1594 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1595 c.is_64 = true;
1597 if (r1 == (r3 | 1)) {
1598 c.u.s64.b = load_reg(r3 | 1);
1599 c.g2 = false;
1600 } else {
1601 c.u.s64.b = regs[r3 | 1];
1602 c.g2 = true;
1605 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1606 c.u.s64.a = regs[r1];
1607 c.g1 = true;
1609 return help_branch(s, &c, is_imm, imm, o->in2);
1612 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1614 int imm, m3 = get_field(s->fields, m3);
1615 bool is_imm;
1616 DisasCompare c;
1618 c.cond = ltgt_cond[m3];
1619 if (s->insn->data) {
1620 c.cond = tcg_unsigned_cond(c.cond);
1622 c.is_64 = c.g1 = c.g2 = true;
1623 c.u.s64.a = o->in1;
1624 c.u.s64.b = o->in2;
1626 is_imm = have_field(s->fields, i4);
1627 if (is_imm) {
1628 imm = get_field(s->fields, i4);
1629 } else {
1630 imm = 0;
1631 o->out = get_address(s, 0, get_field(s->fields, b4),
1632 get_field(s->fields, d4));
1635 return help_branch(s, &c, is_imm, imm, o->out);
1638 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1640 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1641 set_cc_static(s);
1642 return DISAS_NEXT;
1645 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1647 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1648 set_cc_static(s);
1649 return DISAS_NEXT;
1652 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1654 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1655 set_cc_static(s);
1656 return DISAS_NEXT;
1659 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1661 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1662 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1663 tcg_temp_free_i32(m3);
1664 gen_set_cc_nz_f32(s, o->in2);
1665 return DISAS_NEXT;
1668 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1670 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1671 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1672 tcg_temp_free_i32(m3);
1673 gen_set_cc_nz_f64(s, o->in2);
1674 return DISAS_NEXT;
1677 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1679 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1680 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1681 tcg_temp_free_i32(m3);
1682 gen_set_cc_nz_f128(s, o->in1, o->in2);
1683 return DISAS_NEXT;
1686 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1688 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1689 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1690 tcg_temp_free_i32(m3);
1691 gen_set_cc_nz_f32(s, o->in2);
1692 return DISAS_NEXT;
1695 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1697 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1698 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1699 tcg_temp_free_i32(m3);
1700 gen_set_cc_nz_f64(s, o->in2);
1701 return DISAS_NEXT;
1704 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1706 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1707 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1708 tcg_temp_free_i32(m3);
1709 gen_set_cc_nz_f128(s, o->in1, o->in2);
1710 return DISAS_NEXT;
1713 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1715 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1716 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1717 tcg_temp_free_i32(m3);
1718 gen_set_cc_nz_f32(s, o->in2);
1719 return DISAS_NEXT;
1722 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1724 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1725 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1726 tcg_temp_free_i32(m3);
1727 gen_set_cc_nz_f64(s, o->in2);
1728 return DISAS_NEXT;
1731 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1733 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1734 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1735 tcg_temp_free_i32(m3);
1736 gen_set_cc_nz_f128(s, o->in1, o->in2);
1737 return DISAS_NEXT;
1740 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1742 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1743 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1744 tcg_temp_free_i32(m3);
1745 gen_set_cc_nz_f32(s, o->in2);
1746 return DISAS_NEXT;
1749 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1751 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1752 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1753 tcg_temp_free_i32(m3);
1754 gen_set_cc_nz_f64(s, o->in2);
1755 return DISAS_NEXT;
1758 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1760 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1761 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1762 tcg_temp_free_i32(m3);
1763 gen_set_cc_nz_f128(s, o->in1, o->in2);
1764 return DISAS_NEXT;
1767 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1769 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1770 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1771 tcg_temp_free_i32(m3);
1772 return DISAS_NEXT;
1775 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1777 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1778 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1779 tcg_temp_free_i32(m3);
1780 return DISAS_NEXT;
1783 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1785 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1786 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1787 tcg_temp_free_i32(m3);
1788 return_low128(o->out2);
1789 return DISAS_NEXT;
1792 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1794 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1795 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1796 tcg_temp_free_i32(m3);
1797 return DISAS_NEXT;
1800 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1802 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1803 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1804 tcg_temp_free_i32(m3);
1805 return DISAS_NEXT;
1808 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1810 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1811 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1812 tcg_temp_free_i32(m3);
1813 return_low128(o->out2);
1814 return DISAS_NEXT;
1817 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1819 int r2 = get_field(s->fields, r2);
1820 TCGv_i64 len = tcg_temp_new_i64();
1822 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1823 set_cc_static(s);
1824 return_low128(o->out);
1826 tcg_gen_add_i64(regs[r2], regs[r2], len);
1827 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1828 tcg_temp_free_i64(len);
1830 return DISAS_NEXT;
1833 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1835 int l = get_field(s->fields, l1);
1836 TCGv_i32 vl;
1838 switch (l + 1) {
1839 case 1:
1840 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1841 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1842 break;
1843 case 2:
1844 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1845 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1846 break;
1847 case 4:
1848 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1849 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1850 break;
1851 case 8:
1852 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1853 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1854 break;
1855 default:
1856 vl = tcg_const_i32(l);
1857 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1858 tcg_temp_free_i32(vl);
1859 set_cc_static(s);
1860 return DISAS_NEXT;
1862 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1863 return DISAS_NEXT;
1866 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1868 int r1 = get_field(s->fields, r1);
1869 int r2 = get_field(s->fields, r2);
1870 TCGv_i32 t1, t2;
1872 /* r1 and r2 must be even. */
1873 if (r1 & 1 || r2 & 1) {
1874 gen_program_exception(s, PGM_SPECIFICATION);
1875 return DISAS_NORETURN;
1878 t1 = tcg_const_i32(r1);
1879 t2 = tcg_const_i32(r2);
1880 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1881 tcg_temp_free_i32(t1);
1882 tcg_temp_free_i32(t2);
1883 set_cc_static(s);
1884 return DISAS_NEXT;
1887 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
1889 int r1 = get_field(s->fields, r1);
1890 int r3 = get_field(s->fields, r3);
1891 TCGv_i32 t1, t3;
1893 /* r1 and r3 must be even. */
1894 if (r1 & 1 || r3 & 1) {
1895 gen_program_exception(s, PGM_SPECIFICATION);
1896 return DISAS_NORETURN;
1899 t1 = tcg_const_i32(r1);
1900 t3 = tcg_const_i32(r3);
1901 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1902 tcg_temp_free_i32(t1);
1903 tcg_temp_free_i32(t3);
1904 set_cc_static(s);
1905 return DISAS_NEXT;
1908 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
1910 int r1 = get_field(s->fields, r1);
1911 int r3 = get_field(s->fields, r3);
1912 TCGv_i32 t1, t3;
1914 /* r1 and r3 must be even. */
1915 if (r1 & 1 || r3 & 1) {
1916 gen_program_exception(s, PGM_SPECIFICATION);
1917 return DISAS_NORETURN;
1920 t1 = tcg_const_i32(r1);
1921 t3 = tcg_const_i32(r3);
1922 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1923 tcg_temp_free_i32(t1);
1924 tcg_temp_free_i32(t3);
1925 set_cc_static(s);
1926 return DISAS_NEXT;
1929 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
1931 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1932 TCGv_i32 t1 = tcg_temp_new_i32();
1933 tcg_gen_extrl_i64_i32(t1, o->in1);
1934 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1935 set_cc_static(s);
1936 tcg_temp_free_i32(t1);
1937 tcg_temp_free_i32(m3);
1938 return DISAS_NEXT;
1941 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
1943 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1944 set_cc_static(s);
1945 return_low128(o->in2);
1946 return DISAS_NEXT;
1949 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
1951 TCGv_i64 t = tcg_temp_new_i64();
1952 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1953 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1954 tcg_gen_or_i64(o->out, o->out, t);
1955 tcg_temp_free_i64(t);
1956 return DISAS_NEXT;
1959 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
1961 int d2 = get_field(s->fields, d2);
1962 int b2 = get_field(s->fields, b2);
1963 TCGv_i64 addr, cc;
1965 /* Note that in1 = R3 (new value) and
1966 in2 = (zero-extended) R1 (expected value). */
1968 addr = get_address(s, 0, b2, d2);
1969 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1970 get_mem_index(s), s->insn->data | MO_ALIGN);
1971 tcg_temp_free_i64(addr);
1973 /* Are the memory and expected values (un)equal? Note that this setcond
1974 produces the output CC value, thus the NE sense of the test. */
1975 cc = tcg_temp_new_i64();
1976 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1977 tcg_gen_extrl_i64_i32(cc_op, cc);
1978 tcg_temp_free_i64(cc);
1979 set_cc_static(s);
1981 return DISAS_NEXT;
1984 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
1986 int r1 = get_field(s->fields, r1);
1987 int r3 = get_field(s->fields, r3);
1988 int d2 = get_field(s->fields, d2);
1989 int b2 = get_field(s->fields, b2);
1990 TCGv_i64 addr;
1991 TCGv_i32 t_r1, t_r3;
1993 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1994 addr = get_address(s, 0, b2, d2);
1995 t_r1 = tcg_const_i32(r1);
1996 t_r3 = tcg_const_i32(r3);
1997 if (tb_cflags(s->tb) & CF_PARALLEL) {
1998 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
1999 } else {
2000 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2002 tcg_temp_free_i64(addr);
2003 tcg_temp_free_i32(t_r1);
2004 tcg_temp_free_i32(t_r3);
2006 set_cc_static(s);
2007 return DISAS_NEXT;
2010 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2012 int r3 = get_field(s->fields, r3);
2013 TCGv_i32 t_r3 = tcg_const_i32(r3);
2015 if (tb_cflags(s->tb) & CF_PARALLEL) {
2016 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
2017 } else {
2018 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
2020 tcg_temp_free_i32(t_r3);
2022 set_cc_static(s);
2023 return DISAS_NEXT;
2026 #ifndef CONFIG_USER_ONLY
2027 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2029 TCGMemOp mop = s->insn->data;
2030 TCGv_i64 addr, old, cc;
2031 TCGLabel *lab = gen_new_label();
2033 /* Note that in1 = R1 (zero-extended expected value),
2034 out = R1 (original reg), out2 = R1+1 (new value). */
2036 check_privileged(s);
2037 addr = tcg_temp_new_i64();
2038 old = tcg_temp_new_i64();
2039 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2040 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2041 get_mem_index(s), mop | MO_ALIGN);
2042 tcg_temp_free_i64(addr);
2044 /* Are the memory and expected values (un)equal? */
2045 cc = tcg_temp_new_i64();
2046 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2047 tcg_gen_extrl_i64_i32(cc_op, cc);
2049 /* Write back the output now, so that it happens before the
2050 following branch, so that we don't need local temps. */
2051 if ((mop & MO_SIZE) == MO_32) {
2052 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2053 } else {
2054 tcg_gen_mov_i64(o->out, old);
2056 tcg_temp_free_i64(old);
2058 /* If the comparison was equal, and the LSB of R2 was set,
2059 then we need to flush the TLB (for all cpus). */
2060 tcg_gen_xori_i64(cc, cc, 1);
2061 tcg_gen_and_i64(cc, cc, o->in2);
2062 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2063 tcg_temp_free_i64(cc);
2065 gen_helper_purge(cpu_env);
2066 gen_set_label(lab);
2068 return DISAS_NEXT;
2070 #endif
2072 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2074 TCGv_i64 t1 = tcg_temp_new_i64();
2075 TCGv_i32 t2 = tcg_temp_new_i32();
2076 tcg_gen_extrl_i64_i32(t2, o->in1);
2077 gen_helper_cvd(t1, t2);
2078 tcg_temp_free_i32(t2);
2079 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2080 tcg_temp_free_i64(t1);
2081 return DISAS_NEXT;
2084 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2086 int m3 = get_field(s->fields, m3);
2087 TCGLabel *lab = gen_new_label();
2088 TCGCond c;
2090 c = tcg_invert_cond(ltgt_cond[m3]);
2091 if (s->insn->data) {
2092 c = tcg_unsigned_cond(c);
2094 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2096 /* Trap. */
2097 gen_trap(s);
2099 gen_set_label(lab);
2100 return DISAS_NEXT;
2103 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2105 int m3 = get_field(s->fields, m3);
2106 int r1 = get_field(s->fields, r1);
2107 int r2 = get_field(s->fields, r2);
2108 TCGv_i32 tr1, tr2, chk;
2110 /* R1 and R2 must both be even. */
2111 if ((r1 | r2) & 1) {
2112 gen_program_exception(s, PGM_SPECIFICATION);
2113 return DISAS_NORETURN;
2115 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2116 m3 = 0;
2119 tr1 = tcg_const_i32(r1);
2120 tr2 = tcg_const_i32(r2);
2121 chk = tcg_const_i32(m3);
2123 switch (s->insn->data) {
2124 case 12:
2125 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2126 break;
2127 case 14:
2128 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2129 break;
2130 case 21:
2131 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2132 break;
2133 case 24:
2134 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2135 break;
2136 case 41:
2137 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2138 break;
2139 case 42:
2140 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2141 break;
2142 default:
2143 g_assert_not_reached();
2146 tcg_temp_free_i32(tr1);
2147 tcg_temp_free_i32(tr2);
2148 tcg_temp_free_i32(chk);
2149 set_cc_static(s);
2150 return DISAS_NEXT;
2153 #ifndef CONFIG_USER_ONLY
2154 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2156 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2157 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2158 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2160 check_privileged(s);
2161 gen_helper_diag(cpu_env, r1, r3, func_code);
2163 tcg_temp_free_i32(func_code);
2164 tcg_temp_free_i32(r3);
2165 tcg_temp_free_i32(r1);
2166 return DISAS_NEXT;
2168 #endif
2170 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2172 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2173 return_low128(o->out);
2174 return DISAS_NEXT;
2177 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2179 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2180 return_low128(o->out);
2181 return DISAS_NEXT;
2184 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2186 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2187 return_low128(o->out);
2188 return DISAS_NEXT;
2191 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2193 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2194 return_low128(o->out);
2195 return DISAS_NEXT;
2198 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2200 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2201 return DISAS_NEXT;
2204 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2206 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2207 return DISAS_NEXT;
2210 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2212 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2213 return_low128(o->out2);
2214 return DISAS_NEXT;
2217 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2219 int r2 = get_field(s->fields, r2);
2220 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2221 return DISAS_NEXT;
2224 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2226 /* No cache information provided. */
2227 tcg_gen_movi_i64(o->out, -1);
2228 return DISAS_NEXT;
2231 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2233 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2234 return DISAS_NEXT;
2237 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2239 int r1 = get_field(s->fields, r1);
2240 int r2 = get_field(s->fields, r2);
2241 TCGv_i64 t = tcg_temp_new_i64();
2243 /* Note the "subsequently" in the PoO, which implies a defined result
2244 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2245 tcg_gen_shri_i64(t, psw_mask, 32);
2246 store_reg32_i64(r1, t);
2247 if (r2 != 0) {
2248 store_reg32_i64(r2, psw_mask);
2251 tcg_temp_free_i64(t);
2252 return DISAS_NEXT;
2255 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2257 int r1 = get_field(s->fields, r1);
2258 TCGv_i32 ilen;
2259 TCGv_i64 v1;
2261 /* Nested EXECUTE is not allowed. */
2262 if (unlikely(s->ex_value)) {
2263 gen_program_exception(s, PGM_EXECUTE);
2264 return DISAS_NORETURN;
2267 update_psw_addr(s);
2268 update_cc_op(s);
2270 if (r1 == 0) {
2271 v1 = tcg_const_i64(0);
2272 } else {
2273 v1 = regs[r1];
2276 ilen = tcg_const_i32(s->ilen);
2277 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2278 tcg_temp_free_i32(ilen);
2280 if (r1 == 0) {
2281 tcg_temp_free_i64(v1);
2284 return DISAS_PC_CC_UPDATED;
2287 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2289 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2290 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2291 tcg_temp_free_i32(m3);
2292 return DISAS_NEXT;
2295 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2297 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2298 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2299 tcg_temp_free_i32(m3);
2300 return DISAS_NEXT;
2303 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2305 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2306 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2307 return_low128(o->out2);
2308 tcg_temp_free_i32(m3);
2309 return DISAS_NEXT;
2312 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2314 /* We'll use the original input for cc computation, since we get to
2315 compare that against 0, which ought to be better than comparing
2316 the real output against 64. It also lets cc_dst be a convenient
2317 temporary during our computation. */
2318 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2320 /* R1 = IN ? CLZ(IN) : 64. */
2321 tcg_gen_clzi_i64(o->out, o->in2, 64);
2323 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2324 value by 64, which is undefined. But since the shift is 64 iff the
2325 input is zero, we still get the correct result after and'ing. */
2326 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2327 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2328 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2329 return DISAS_NEXT;
2332 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2334 int m3 = get_field(s->fields, m3);
2335 int pos, len, base = s->insn->data;
2336 TCGv_i64 tmp = tcg_temp_new_i64();
2337 uint64_t ccm;
2339 switch (m3) {
2340 case 0xf:
2341 /* Effectively a 32-bit load. */
2342 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2343 len = 32;
2344 goto one_insert;
2346 case 0xc:
2347 case 0x6:
2348 case 0x3:
2349 /* Effectively a 16-bit load. */
2350 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2351 len = 16;
2352 goto one_insert;
2354 case 0x8:
2355 case 0x4:
2356 case 0x2:
2357 case 0x1:
2358 /* Effectively an 8-bit load. */
2359 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2360 len = 8;
2361 goto one_insert;
2363 one_insert:
2364 pos = base + ctz32(m3) * 8;
2365 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2366 ccm = ((1ull << len) - 1) << pos;
2367 break;
2369 default:
2370 /* This is going to be a sequence of loads and inserts. */
2371 pos = base + 32 - 8;
2372 ccm = 0;
2373 while (m3) {
2374 if (m3 & 0x8) {
2375 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2376 tcg_gen_addi_i64(o->in2, o->in2, 1);
2377 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2378 ccm |= 0xff << pos;
2380 m3 = (m3 << 1) & 0xf;
2381 pos -= 8;
2383 break;
2386 tcg_gen_movi_i64(tmp, ccm);
2387 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2388 tcg_temp_free_i64(tmp);
2389 return DISAS_NEXT;
2392 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2394 int shift = s->insn->data & 0xff;
2395 int size = s->insn->data >> 8;
2396 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2397 return DISAS_NEXT;
2400 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2402 TCGv_i64 t1;
2404 gen_op_calc_cc(s);
2405 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2407 t1 = tcg_temp_new_i64();
2408 tcg_gen_shli_i64(t1, psw_mask, 20);
2409 tcg_gen_shri_i64(t1, t1, 36);
2410 tcg_gen_or_i64(o->out, o->out, t1);
2412 tcg_gen_extu_i32_i64(t1, cc_op);
2413 tcg_gen_shli_i64(t1, t1, 28);
2414 tcg_gen_or_i64(o->out, o->out, t1);
2415 tcg_temp_free_i64(t1);
2416 return DISAS_NEXT;
2419 #ifndef CONFIG_USER_ONLY
2420 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2422 TCGv_i32 m4;
2424 check_privileged(s);
2425 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2426 m4 = tcg_const_i32(get_field(s->fields, m4));
2427 } else {
2428 m4 = tcg_const_i32(0);
2430 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2431 tcg_temp_free_i32(m4);
2432 return DISAS_NEXT;
2435 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2437 TCGv_i32 m4;
2439 check_privileged(s);
2440 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2441 m4 = tcg_const_i32(get_field(s->fields, m4));
2442 } else {
2443 m4 = tcg_const_i32(0);
2445 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2446 tcg_temp_free_i32(m4);
2447 return DISAS_NEXT;
2450 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2452 check_privileged(s);
2453 gen_helper_iske(o->out, cpu_env, o->in2);
2454 return DISAS_NEXT;
2456 #endif
2458 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2460 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2461 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2462 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2463 TCGv_i32 t_r1, t_r2, t_r3, type;
2465 switch (s->insn->data) {
2466 case S390_FEAT_TYPE_KMCTR:
2467 if (r3 & 1 || !r3) {
2468 gen_program_exception(s, PGM_SPECIFICATION);
2469 return DISAS_NORETURN;
2471 /* FALL THROUGH */
2472 case S390_FEAT_TYPE_PPNO:
2473 case S390_FEAT_TYPE_KMF:
2474 case S390_FEAT_TYPE_KMC:
2475 case S390_FEAT_TYPE_KMO:
2476 case S390_FEAT_TYPE_KM:
2477 if (r1 & 1 || !r1) {
2478 gen_program_exception(s, PGM_SPECIFICATION);
2479 return DISAS_NORETURN;
2481 /* FALL THROUGH */
2482 case S390_FEAT_TYPE_KMAC:
2483 case S390_FEAT_TYPE_KIMD:
2484 case S390_FEAT_TYPE_KLMD:
2485 if (r2 & 1 || !r2) {
2486 gen_program_exception(s, PGM_SPECIFICATION);
2487 return DISAS_NORETURN;
2489 /* FALL THROUGH */
2490 case S390_FEAT_TYPE_PCKMO:
2491 case S390_FEAT_TYPE_PCC:
2492 break;
2493 default:
2494 g_assert_not_reached();
2497 t_r1 = tcg_const_i32(r1);
2498 t_r2 = tcg_const_i32(r2);
2499 t_r3 = tcg_const_i32(r3);
2500 type = tcg_const_i32(s->insn->data);
2501 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2502 set_cc_static(s);
2503 tcg_temp_free_i32(t_r1);
2504 tcg_temp_free_i32(t_r2);
2505 tcg_temp_free_i32(t_r3);
2506 tcg_temp_free_i32(type);
2507 return DISAS_NEXT;
2510 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2512 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2513 set_cc_static(s);
2514 return DISAS_NEXT;
2517 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2519 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2520 set_cc_static(s);
2521 return DISAS_NEXT;
2524 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2526 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2527 set_cc_static(s);
2528 return DISAS_NEXT;
2531 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2533 /* The real output is indeed the original value in memory;
2534 recompute the addition for the computation of CC. */
2535 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2536 s->insn->data | MO_ALIGN);
2537 /* However, we need to recompute the addition for setting CC. */
2538 tcg_gen_add_i64(o->out, o->in1, o->in2);
2539 return DISAS_NEXT;
2542 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2544 /* The real output is indeed the original value in memory;
2545 recompute the addition for the computation of CC. */
2546 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2547 s->insn->data | MO_ALIGN);
2548 /* However, we need to recompute the operation for setting CC. */
2549 tcg_gen_and_i64(o->out, o->in1, o->in2);
2550 return DISAS_NEXT;
2553 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2555 /* The real output is indeed the original value in memory;
2556 recompute the addition for the computation of CC. */
2557 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2558 s->insn->data | MO_ALIGN);
2559 /* However, we need to recompute the operation for setting CC. */
2560 tcg_gen_or_i64(o->out, o->in1, o->in2);
2561 return DISAS_NEXT;
2564 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2566 /* The real output is indeed the original value in memory;
2567 recompute the addition for the computation of CC. */
2568 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2569 s->insn->data | MO_ALIGN);
2570 /* However, we need to recompute the operation for setting CC. */
2571 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2572 return DISAS_NEXT;
2575 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2577 gen_helper_ldeb(o->out, cpu_env, o->in2);
2578 return DISAS_NEXT;
2581 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2583 gen_helper_ledb(o->out, cpu_env, o->in2);
2584 return DISAS_NEXT;
2587 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2589 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2590 return DISAS_NEXT;
2593 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2595 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2596 return DISAS_NEXT;
2599 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2601 gen_helper_lxdb(o->out, cpu_env, o->in2);
2602 return_low128(o->out2);
2603 return DISAS_NEXT;
2606 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2608 gen_helper_lxeb(o->out, cpu_env, o->in2);
2609 return_low128(o->out2);
2610 return DISAS_NEXT;
2613 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2615 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2616 return DISAS_NEXT;
2619 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2621 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2622 return DISAS_NEXT;
2625 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2627 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2628 return DISAS_NEXT;
2631 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2633 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2634 return DISAS_NEXT;
2637 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2639 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2640 return DISAS_NEXT;
2643 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2645 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2646 return DISAS_NEXT;
2649 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2651 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2652 return DISAS_NEXT;
2655 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2657 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2658 return DISAS_NEXT;
2661 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2663 TCGLabel *lab = gen_new_label();
2664 store_reg32_i64(get_field(s->fields, r1), o->in2);
2665 /* The value is stored even in case of trap. */
2666 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2667 gen_trap(s);
2668 gen_set_label(lab);
2669 return DISAS_NEXT;
2672 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2674 TCGLabel *lab = gen_new_label();
2675 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2676 /* The value is stored even in case of trap. */
2677 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2678 gen_trap(s);
2679 gen_set_label(lab);
2680 return DISAS_NEXT;
2683 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2685 TCGLabel *lab = gen_new_label();
2686 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2687 /* The value is stored even in case of trap. */
2688 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2689 gen_trap(s);
2690 gen_set_label(lab);
2691 return DISAS_NEXT;
2694 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2696 TCGLabel *lab = gen_new_label();
2697 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2698 /* The value is stored even in case of trap. */
2699 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2700 gen_trap(s);
2701 gen_set_label(lab);
2702 return DISAS_NEXT;
2705 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2707 TCGLabel *lab = gen_new_label();
2708 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2709 /* The value is stored even in case of trap. */
2710 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2711 gen_trap(s);
2712 gen_set_label(lab);
2713 return DISAS_NEXT;
2716 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2718 DisasCompare c;
2720 disas_jcc(s, &c, get_field(s->fields, m3));
2722 if (c.is_64) {
2723 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2724 o->in2, o->in1);
2725 free_compare(&c);
2726 } else {
2727 TCGv_i32 t32 = tcg_temp_new_i32();
2728 TCGv_i64 t, z;
2730 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2731 free_compare(&c);
2733 t = tcg_temp_new_i64();
2734 tcg_gen_extu_i32_i64(t, t32);
2735 tcg_temp_free_i32(t32);
2737 z = tcg_const_i64(0);
2738 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2739 tcg_temp_free_i64(t);
2740 tcg_temp_free_i64(z);
2743 return DISAS_NEXT;
2746 #ifndef CONFIG_USER_ONLY
2747 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2749 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2750 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2751 check_privileged(s);
2752 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2753 tcg_temp_free_i32(r1);
2754 tcg_temp_free_i32(r3);
2755 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2756 return DISAS_PC_STALE_NOCHAIN;
2759 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2761 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2762 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2763 check_privileged(s);
2764 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2765 tcg_temp_free_i32(r1);
2766 tcg_temp_free_i32(r3);
2767 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2768 return DISAS_PC_STALE_NOCHAIN;
2771 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2773 check_privileged(s);
2774 gen_helper_lra(o->out, cpu_env, o->in2);
2775 set_cc_static(s);
2776 return DISAS_NEXT;
2779 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2781 check_privileged(s);
2783 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2784 return DISAS_NEXT;
2787 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2789 TCGv_i64 t1, t2;
2791 check_privileged(s);
2792 per_breaking_event(s);
2794 t1 = tcg_temp_new_i64();
2795 t2 = tcg_temp_new_i64();
2796 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2797 tcg_gen_addi_i64(o->in2, o->in2, 4);
2798 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2799 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2800 tcg_gen_shli_i64(t1, t1, 32);
2801 gen_helper_load_psw(cpu_env, t1, t2);
2802 tcg_temp_free_i64(t1);
2803 tcg_temp_free_i64(t2);
2804 return DISAS_NORETURN;
2807 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2809 TCGv_i64 t1, t2;
2811 check_privileged(s);
2812 per_breaking_event(s);
2814 t1 = tcg_temp_new_i64();
2815 t2 = tcg_temp_new_i64();
2816 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2817 tcg_gen_addi_i64(o->in2, o->in2, 8);
2818 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2819 gen_helper_load_psw(cpu_env, t1, t2);
2820 tcg_temp_free_i64(t1);
2821 tcg_temp_free_i64(t2);
2822 return DISAS_NORETURN;
2824 #endif
2826 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2828 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2829 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2830 gen_helper_lam(cpu_env, r1, o->in2, r3);
2831 tcg_temp_free_i32(r1);
2832 tcg_temp_free_i32(r3);
2833 return DISAS_NEXT;
2836 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2838 int r1 = get_field(s->fields, r1);
2839 int r3 = get_field(s->fields, r3);
2840 TCGv_i64 t1, t2;
2842 /* Only one register to read. */
2843 t1 = tcg_temp_new_i64();
2844 if (unlikely(r1 == r3)) {
2845 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2846 store_reg32_i64(r1, t1);
2847 tcg_temp_free(t1);
2848 return DISAS_NEXT;
2851 /* First load the values of the first and last registers to trigger
2852 possible page faults. */
2853 t2 = tcg_temp_new_i64();
2854 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2855 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2856 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2857 store_reg32_i64(r1, t1);
2858 store_reg32_i64(r3, t2);
2860 /* Only two registers to read. */
2861 if (((r1 + 1) & 15) == r3) {
2862 tcg_temp_free(t2);
2863 tcg_temp_free(t1);
2864 return DISAS_NEXT;
2867 /* Then load the remaining registers. Page fault can't occur. */
2868 r3 = (r3 - 1) & 15;
2869 tcg_gen_movi_i64(t2, 4);
2870 while (r1 != r3) {
2871 r1 = (r1 + 1) & 15;
2872 tcg_gen_add_i64(o->in2, o->in2, t2);
2873 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2874 store_reg32_i64(r1, t1);
2876 tcg_temp_free(t2);
2877 tcg_temp_free(t1);
2879 return DISAS_NEXT;
2882 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2884 int r1 = get_field(s->fields, r1);
2885 int r3 = get_field(s->fields, r3);
2886 TCGv_i64 t1, t2;
2888 /* Only one register to read. */
2889 t1 = tcg_temp_new_i64();
2890 if (unlikely(r1 == r3)) {
2891 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2892 store_reg32h_i64(r1, t1);
2893 tcg_temp_free(t1);
2894 return DISAS_NEXT;
2897 /* First load the values of the first and last registers to trigger
2898 possible page faults. */
2899 t2 = tcg_temp_new_i64();
2900 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2901 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2902 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2903 store_reg32h_i64(r1, t1);
2904 store_reg32h_i64(r3, t2);
2906 /* Only two registers to read. */
2907 if (((r1 + 1) & 15) == r3) {
2908 tcg_temp_free(t2);
2909 tcg_temp_free(t1);
2910 return DISAS_NEXT;
2913 /* Then load the remaining registers. Page fault can't occur. */
2914 r3 = (r3 - 1) & 15;
2915 tcg_gen_movi_i64(t2, 4);
2916 while (r1 != r3) {
2917 r1 = (r1 + 1) & 15;
2918 tcg_gen_add_i64(o->in2, o->in2, t2);
2919 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2920 store_reg32h_i64(r1, t1);
2922 tcg_temp_free(t2);
2923 tcg_temp_free(t1);
2925 return DISAS_NEXT;
2928 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
2930 int r1 = get_field(s->fields, r1);
2931 int r3 = get_field(s->fields, r3);
2932 TCGv_i64 t1, t2;
2934 /* Only one register to read. */
2935 if (unlikely(r1 == r3)) {
2936 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2937 return DISAS_NEXT;
2940 /* First load the values of the first and last registers to trigger
2941 possible page faults. */
2942 t1 = tcg_temp_new_i64();
2943 t2 = tcg_temp_new_i64();
2944 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2945 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2946 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2947 tcg_gen_mov_i64(regs[r1], t1);
2948 tcg_temp_free(t2);
2950 /* Only two registers to read. */
2951 if (((r1 + 1) & 15) == r3) {
2952 tcg_temp_free(t1);
2953 return DISAS_NEXT;
2956 /* Then load the remaining registers. Page fault can't occur. */
2957 r3 = (r3 - 1) & 15;
2958 tcg_gen_movi_i64(t1, 8);
2959 while (r1 != r3) {
2960 r1 = (r1 + 1) & 15;
2961 tcg_gen_add_i64(o->in2, o->in2, t1);
2962 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2964 tcg_temp_free(t1);
2966 return DISAS_NEXT;
2969 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
2971 TCGv_i64 a1, a2;
2972 TCGMemOp mop = s->insn->data;
2974 /* In a parallel context, stop the world and single step. */
2975 if (tb_cflags(s->tb) & CF_PARALLEL) {
2976 update_psw_addr(s);
2977 update_cc_op(s);
2978 gen_exception(EXCP_ATOMIC);
2979 return DISAS_NORETURN;
2982 /* In a serial context, perform the two loads ... */
2983 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2984 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2985 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2986 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2987 tcg_temp_free_i64(a1);
2988 tcg_temp_free_i64(a2);
2990 /* ... and indicate that we performed them while interlocked. */
2991 gen_op_movi_cc(s, 0);
2992 return DISAS_NEXT;
2995 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
2997 if (tb_cflags(s->tb) & CF_PARALLEL) {
2998 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
2999 } else {
3000 gen_helper_lpq(o->out, cpu_env, o->in2);
3002 return_low128(o->out2);
3003 return DISAS_NEXT;
3006 #ifndef CONFIG_USER_ONLY
3007 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3009 check_privileged(s);
3010 gen_helper_lura(o->out, cpu_env, o->in2);
3011 return DISAS_NEXT;
3014 static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3016 check_privileged(s);
3017 gen_helper_lurag(o->out, cpu_env, o->in2);
3018 return DISAS_NEXT;
3020 #endif
3022 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3024 tcg_gen_andi_i64(o->out, o->in2, -256);
3025 return DISAS_NEXT;
3028 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3030 o->out = o->in2;
3031 o->g_out = o->g_in2;
3032 o->in2 = NULL;
3033 o->g_in2 = false;
3034 return DISAS_NEXT;
3037 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3039 int b2 = get_field(s->fields, b2);
3040 TCGv ar1 = tcg_temp_new_i64();
3042 o->out = o->in2;
3043 o->g_out = o->g_in2;
3044 o->in2 = NULL;
3045 o->g_in2 = false;
3047 switch (s->tb->flags & FLAG_MASK_ASC) {
3048 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3049 tcg_gen_movi_i64(ar1, 0);
3050 break;
3051 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3052 tcg_gen_movi_i64(ar1, 1);
3053 break;
3054 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3055 if (b2) {
3056 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3057 } else {
3058 tcg_gen_movi_i64(ar1, 0);
3060 break;
3061 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3062 tcg_gen_movi_i64(ar1, 2);
3063 break;
3066 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3067 tcg_temp_free_i64(ar1);
3069 return DISAS_NEXT;
3072 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3074 o->out = o->in1;
3075 o->out2 = o->in2;
3076 o->g_out = o->g_in1;
3077 o->g_out2 = o->g_in2;
3078 o->in1 = NULL;
3079 o->in2 = NULL;
3080 o->g_in1 = o->g_in2 = false;
3081 return DISAS_NEXT;
3084 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3086 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3087 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3088 tcg_temp_free_i32(l);
3089 return DISAS_NEXT;
3092 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3094 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3095 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3096 tcg_temp_free_i32(l);
3097 return DISAS_NEXT;
3100 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3102 int r1 = get_field(s->fields, r1);
3103 int r2 = get_field(s->fields, r2);
3104 TCGv_i32 t1, t2;
3106 /* r1 and r2 must be even. */
3107 if (r1 & 1 || r2 & 1) {
3108 gen_program_exception(s, PGM_SPECIFICATION);
3109 return DISAS_NORETURN;
3112 t1 = tcg_const_i32(r1);
3113 t2 = tcg_const_i32(r2);
3114 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3115 tcg_temp_free_i32(t1);
3116 tcg_temp_free_i32(t2);
3117 set_cc_static(s);
3118 return DISAS_NEXT;
3121 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3123 int r1 = get_field(s->fields, r1);
3124 int r3 = get_field(s->fields, r3);
3125 TCGv_i32 t1, t3;
3127 /* r1 and r3 must be even. */
3128 if (r1 & 1 || r3 & 1) {
3129 gen_program_exception(s, PGM_SPECIFICATION);
3130 return DISAS_NORETURN;
3133 t1 = tcg_const_i32(r1);
3134 t3 = tcg_const_i32(r3);
3135 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3136 tcg_temp_free_i32(t1);
3137 tcg_temp_free_i32(t3);
3138 set_cc_static(s);
3139 return DISAS_NEXT;
3142 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3144 int r1 = get_field(s->fields, r1);
3145 int r3 = get_field(s->fields, r3);
3146 TCGv_i32 t1, t3;
3148 /* r1 and r3 must be even. */
3149 if (r1 & 1 || r3 & 1) {
3150 gen_program_exception(s, PGM_SPECIFICATION);
3151 return DISAS_NORETURN;
3154 t1 = tcg_const_i32(r1);
3155 t3 = tcg_const_i32(r3);
3156 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3157 tcg_temp_free_i32(t1);
3158 tcg_temp_free_i32(t3);
3159 set_cc_static(s);
3160 return DISAS_NEXT;
3163 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3165 int r3 = get_field(s->fields, r3);
3166 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3167 set_cc_static(s);
3168 return DISAS_NEXT;
3171 #ifndef CONFIG_USER_ONLY
3172 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3174 int r1 = get_field(s->fields, l1);
3175 check_privileged(s);
3176 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3177 set_cc_static(s);
3178 return DISAS_NEXT;
3181 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3183 int r1 = get_field(s->fields, l1);
3184 check_privileged(s);
3185 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3186 set_cc_static(s);
3187 return DISAS_NEXT;
3189 #endif
3191 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3193 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3194 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3195 tcg_temp_free_i32(l);
3196 return DISAS_NEXT;
3199 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3201 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3202 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3203 tcg_temp_free_i32(l);
3204 return DISAS_NEXT;
3207 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3209 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3210 set_cc_static(s);
3211 return DISAS_NEXT;
3214 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3216 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3217 set_cc_static(s);
3218 return_low128(o->in2);
3219 return DISAS_NEXT;
3222 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3224 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3225 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3226 tcg_temp_free_i32(l);
3227 return DISAS_NEXT;
3230 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3232 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3233 return DISAS_NEXT;
3236 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3238 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3239 return DISAS_NEXT;
3242 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3244 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3245 return DISAS_NEXT;
3248 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3250 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3251 return DISAS_NEXT;
3254 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3256 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3257 return DISAS_NEXT;
3260 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3262 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3263 return_low128(o->out2);
3264 return DISAS_NEXT;
3267 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3269 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3270 return_low128(o->out2);
3271 return DISAS_NEXT;
3274 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3276 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3277 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3278 tcg_temp_free_i64(r3);
3279 return DISAS_NEXT;
3282 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3284 int r3 = get_field(s->fields, r3);
3285 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3286 return DISAS_NEXT;
3289 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3291 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3292 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3293 tcg_temp_free_i64(r3);
3294 return DISAS_NEXT;
3297 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3299 int r3 = get_field(s->fields, r3);
3300 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3301 return DISAS_NEXT;
3304 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3306 TCGv_i64 z, n;
3307 z = tcg_const_i64(0);
3308 n = tcg_temp_new_i64();
3309 tcg_gen_neg_i64(n, o->in2);
3310 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3311 tcg_temp_free_i64(n);
3312 tcg_temp_free_i64(z);
3313 return DISAS_NEXT;
3316 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3318 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3319 return DISAS_NEXT;
3322 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3324 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3325 return DISAS_NEXT;
3328 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3330 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3331 tcg_gen_mov_i64(o->out2, o->in2);
3332 return DISAS_NEXT;
3335 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3337 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3338 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3339 tcg_temp_free_i32(l);
3340 set_cc_static(s);
3341 return DISAS_NEXT;
3344 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3346 tcg_gen_neg_i64(o->out, o->in2);
3347 return DISAS_NEXT;
3350 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3352 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3353 return DISAS_NEXT;
3356 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3358 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3359 return DISAS_NEXT;
3362 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3364 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3365 tcg_gen_mov_i64(o->out2, o->in2);
3366 return DISAS_NEXT;
3369 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3371 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3372 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3373 tcg_temp_free_i32(l);
3374 set_cc_static(s);
3375 return DISAS_NEXT;
3378 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3380 tcg_gen_or_i64(o->out, o->in1, o->in2);
3381 return DISAS_NEXT;
3384 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3386 int shift = s->insn->data & 0xff;
3387 int size = s->insn->data >> 8;
3388 uint64_t mask = ((1ull << size) - 1) << shift;
3390 assert(!o->g_in2);
3391 tcg_gen_shli_i64(o->in2, o->in2, shift);
3392 tcg_gen_or_i64(o->out, o->in1, o->in2);
3394 /* Produce the CC from only the bits manipulated. */
3395 tcg_gen_andi_i64(cc_dst, o->out, mask);
3396 set_cc_nz_u64(s, cc_dst);
3397 return DISAS_NEXT;
3400 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3402 o->in1 = tcg_temp_new_i64();
3404 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3405 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3406 } else {
3407 /* Perform the atomic operation in memory. */
3408 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3409 s->insn->data);
3412 /* Recompute also for atomic case: needed for setting CC. */
3413 tcg_gen_or_i64(o->out, o->in1, o->in2);
3415 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3416 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3418 return DISAS_NEXT;
3421 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3423 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3424 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3425 tcg_temp_free_i32(l);
3426 return DISAS_NEXT;
3429 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3431 int l2 = get_field(s->fields, l2) + 1;
3432 TCGv_i32 l;
3434 /* The length must not exceed 32 bytes. */
3435 if (l2 > 32) {
3436 gen_program_exception(s, PGM_SPECIFICATION);
3437 return DISAS_NORETURN;
3439 l = tcg_const_i32(l2);
3440 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3441 tcg_temp_free_i32(l);
3442 return DISAS_NEXT;
3445 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3447 int l2 = get_field(s->fields, l2) + 1;
3448 TCGv_i32 l;
3450 /* The length must be even and should not exceed 64 bytes. */
3451 if ((l2 & 1) || (l2 > 64)) {
3452 gen_program_exception(s, PGM_SPECIFICATION);
3453 return DISAS_NORETURN;
3455 l = tcg_const_i32(l2);
3456 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3457 tcg_temp_free_i32(l);
3458 return DISAS_NEXT;
3461 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3463 gen_helper_popcnt(o->out, o->in2);
3464 return DISAS_NEXT;
3467 #ifndef CONFIG_USER_ONLY
3468 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3470 check_privileged(s);
3471 gen_helper_ptlb(cpu_env);
3472 return DISAS_NEXT;
3474 #endif
3476 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3478 int i3 = get_field(s->fields, i3);
3479 int i4 = get_field(s->fields, i4);
3480 int i5 = get_field(s->fields, i5);
3481 int do_zero = i4 & 0x80;
3482 uint64_t mask, imask, pmask;
3483 int pos, len, rot;
3485 /* Adjust the arguments for the specific insn. */
3486 switch (s->fields->op2) {
3487 case 0x55: /* risbg */
3488 case 0x59: /* risbgn */
3489 i3 &= 63;
3490 i4 &= 63;
3491 pmask = ~0;
3492 break;
3493 case 0x5d: /* risbhg */
3494 i3 &= 31;
3495 i4 &= 31;
3496 pmask = 0xffffffff00000000ull;
3497 break;
3498 case 0x51: /* risblg */
3499 i3 &= 31;
3500 i4 &= 31;
3501 pmask = 0x00000000ffffffffull;
3502 break;
3503 default:
3504 g_assert_not_reached();
3507 /* MASK is the set of bits to be inserted from R2.
3508 Take care for I3/I4 wraparound. */
3509 mask = pmask >> i3;
3510 if (i3 <= i4) {
3511 mask ^= pmask >> i4 >> 1;
3512 } else {
3513 mask |= ~(pmask >> i4 >> 1);
3515 mask &= pmask;
3517 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3518 insns, we need to keep the other half of the register. */
3519 imask = ~mask | ~pmask;
3520 if (do_zero) {
3521 imask = ~pmask;
3524 len = i4 - i3 + 1;
3525 pos = 63 - i4;
3526 rot = i5 & 63;
3527 if (s->fields->op2 == 0x5d) {
3528 pos += 32;
3531 /* In some cases we can implement this with extract. */
3532 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3533 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3534 return DISAS_NEXT;
3537 /* In some cases we can implement this with deposit. */
3538 if (len > 0 && (imask == 0 || ~mask == imask)) {
3539 /* Note that we rotate the bits to be inserted to the lsb, not to
3540 the position as described in the PoO. */
3541 rot = (rot - pos) & 63;
3542 } else {
3543 pos = -1;
3546 /* Rotate the input as necessary. */
3547 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3549 /* Insert the selected bits into the output. */
3550 if (pos >= 0) {
3551 if (imask == 0) {
3552 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3553 } else {
3554 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3556 } else if (imask == 0) {
3557 tcg_gen_andi_i64(o->out, o->in2, mask);
3558 } else {
3559 tcg_gen_andi_i64(o->in2, o->in2, mask);
3560 tcg_gen_andi_i64(o->out, o->out, imask);
3561 tcg_gen_or_i64(o->out, o->out, o->in2);
3563 return DISAS_NEXT;
3566 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3568 int i3 = get_field(s->fields, i3);
3569 int i4 = get_field(s->fields, i4);
3570 int i5 = get_field(s->fields, i5);
3571 uint64_t mask;
3573 /* If this is a test-only form, arrange to discard the result. */
3574 if (i3 & 0x80) {
3575 o->out = tcg_temp_new_i64();
3576 o->g_out = false;
3579 i3 &= 63;
3580 i4 &= 63;
3581 i5 &= 63;
3583 /* MASK is the set of bits to be operated on from R2.
3584 Take care for I3/I4 wraparound. */
3585 mask = ~0ull >> i3;
3586 if (i3 <= i4) {
3587 mask ^= ~0ull >> i4 >> 1;
3588 } else {
3589 mask |= ~(~0ull >> i4 >> 1);
3592 /* Rotate the input as necessary. */
3593 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3595 /* Operate. */
3596 switch (s->fields->op2) {
3597 case 0x55: /* AND */
3598 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3599 tcg_gen_and_i64(o->out, o->out, o->in2);
3600 break;
3601 case 0x56: /* OR */
3602 tcg_gen_andi_i64(o->in2, o->in2, mask);
3603 tcg_gen_or_i64(o->out, o->out, o->in2);
3604 break;
3605 case 0x57: /* XOR */
3606 tcg_gen_andi_i64(o->in2, o->in2, mask);
3607 tcg_gen_xor_i64(o->out, o->out, o->in2);
3608 break;
3609 default:
3610 abort();
3613 /* Set the CC. */
3614 tcg_gen_andi_i64(cc_dst, o->out, mask);
3615 set_cc_nz_u64(s, cc_dst);
3616 return DISAS_NEXT;
3619 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3621 tcg_gen_bswap16_i64(o->out, o->in2);
3622 return DISAS_NEXT;
3625 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3627 tcg_gen_bswap32_i64(o->out, o->in2);
3628 return DISAS_NEXT;
3631 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3633 tcg_gen_bswap64_i64(o->out, o->in2);
3634 return DISAS_NEXT;
3637 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3639 TCGv_i32 t1 = tcg_temp_new_i32();
3640 TCGv_i32 t2 = tcg_temp_new_i32();
3641 TCGv_i32 to = tcg_temp_new_i32();
3642 tcg_gen_extrl_i64_i32(t1, o->in1);
3643 tcg_gen_extrl_i64_i32(t2, o->in2);
3644 tcg_gen_rotl_i32(to, t1, t2);
3645 tcg_gen_extu_i32_i64(o->out, to);
3646 tcg_temp_free_i32(t1);
3647 tcg_temp_free_i32(t2);
3648 tcg_temp_free_i32(to);
3649 return DISAS_NEXT;
3652 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3654 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3655 return DISAS_NEXT;
3658 #ifndef CONFIG_USER_ONLY
3659 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3661 check_privileged(s);
3662 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3663 set_cc_static(s);
3664 return DISAS_NEXT;
3667 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3669 check_privileged(s);
3670 gen_helper_sacf(cpu_env, o->in2);
3671 /* Addressing mode has changed, so end the block. */
3672 return DISAS_PC_STALE;
3674 #endif
3676 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3678 int sam = s->insn->data;
3679 TCGv_i64 tsam;
3680 uint64_t mask;
3682 switch (sam) {
3683 case 0:
3684 mask = 0xffffff;
3685 break;
3686 case 1:
3687 mask = 0x7fffffff;
3688 break;
3689 default:
3690 mask = -1;
3691 break;
3694 /* Bizarre but true, we check the address of the current insn for the
3695 specification exception, not the next to be executed. Thus the PoO
3696 documents that Bad Things Happen two bytes before the end. */
3697 if (s->pc & ~mask) {
3698 gen_program_exception(s, PGM_SPECIFICATION);
3699 return DISAS_NORETURN;
3701 s->next_pc &= mask;
3703 tsam = tcg_const_i64(sam);
3704 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3705 tcg_temp_free_i64(tsam);
3707 /* Always exit the TB, since we (may have) changed execution mode. */
3708 return DISAS_PC_STALE;
3711 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3713 int r1 = get_field(s->fields, r1);
3714 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3715 return DISAS_NEXT;
3718 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3720 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3721 return DISAS_NEXT;
3724 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3726 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3727 return DISAS_NEXT;
3730 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3732 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3733 return_low128(o->out2);
3734 return DISAS_NEXT;
3737 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3739 gen_helper_sqeb(o->out, cpu_env, o->in2);
3740 return DISAS_NEXT;
3743 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3745 gen_helper_sqdb(o->out, cpu_env, o->in2);
3746 return DISAS_NEXT;
3749 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3751 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3752 return_low128(o->out2);
3753 return DISAS_NEXT;
3756 #ifndef CONFIG_USER_ONLY
3757 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3759 check_privileged(s);
3760 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3761 set_cc_static(s);
3762 return DISAS_NEXT;
3765 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3767 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3768 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3769 check_privileged(s);
3770 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3771 set_cc_static(s);
3772 tcg_temp_free_i32(r1);
3773 tcg_temp_free_i32(r3);
3774 return DISAS_NEXT;
3776 #endif
3778 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3780 DisasCompare c;
3781 TCGv_i64 a, h;
3782 TCGLabel *lab;
3783 int r1;
3785 disas_jcc(s, &c, get_field(s->fields, m3));
3787 /* We want to store when the condition is fulfilled, so branch
3788 out when it's not */
3789 c.cond = tcg_invert_cond(c.cond);
3791 lab = gen_new_label();
3792 if (c.is_64) {
3793 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3794 } else {
3795 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3797 free_compare(&c);
3799 r1 = get_field(s->fields, r1);
3800 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3801 switch (s->insn->data) {
3802 case 1: /* STOCG */
3803 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3804 break;
3805 case 0: /* STOC */
3806 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3807 break;
3808 case 2: /* STOCFH */
3809 h = tcg_temp_new_i64();
3810 tcg_gen_shri_i64(h, regs[r1], 32);
3811 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3812 tcg_temp_free_i64(h);
3813 break;
3814 default:
3815 g_assert_not_reached();
3817 tcg_temp_free_i64(a);
3819 gen_set_label(lab);
3820 return DISAS_NEXT;
3823 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3825 uint64_t sign = 1ull << s->insn->data;
3826 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3827 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3828 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3829 /* The arithmetic left shift is curious in that it does not affect
3830 the sign bit. Copy that over from the source unchanged. */
3831 tcg_gen_andi_i64(o->out, o->out, ~sign);
3832 tcg_gen_andi_i64(o->in1, o->in1, sign);
3833 tcg_gen_or_i64(o->out, o->out, o->in1);
3834 return DISAS_NEXT;
3837 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3839 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3840 return DISAS_NEXT;
3843 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3845 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3846 return DISAS_NEXT;
3849 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3851 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3852 return DISAS_NEXT;
3855 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3857 gen_helper_sfpc(cpu_env, o->in2);
3858 return DISAS_NEXT;
3861 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3863 gen_helper_sfas(cpu_env, o->in2);
3864 return DISAS_NEXT;
3867 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3869 int b2 = get_field(s->fields, b2);
3870 int d2 = get_field(s->fields, d2);
3871 TCGv_i64 t1 = tcg_temp_new_i64();
3872 TCGv_i64 t2 = tcg_temp_new_i64();
3873 int mask, pos, len;
3875 switch (s->fields->op2) {
3876 case 0x99: /* SRNM */
3877 pos = 0, len = 2;
3878 break;
3879 case 0xb8: /* SRNMB */
3880 pos = 0, len = 3;
3881 break;
3882 case 0xb9: /* SRNMT */
3883 pos = 4, len = 3;
3884 break;
3885 default:
3886 tcg_abort();
3888 mask = (1 << len) - 1;
3890 /* Insert the value into the appropriate field of the FPC. */
3891 if (b2 == 0) {
3892 tcg_gen_movi_i64(t1, d2 & mask);
3893 } else {
3894 tcg_gen_addi_i64(t1, regs[b2], d2);
3895 tcg_gen_andi_i64(t1, t1, mask);
3897 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3898 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3899 tcg_temp_free_i64(t1);
3901 /* Then install the new FPC to set the rounding mode in fpu_status. */
3902 gen_helper_sfpc(cpu_env, t2);
3903 tcg_temp_free_i64(t2);
3904 return DISAS_NEXT;
3907 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
3909 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3910 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3911 set_cc_static(s);
3913 tcg_gen_shri_i64(o->in1, o->in1, 24);
3914 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3915 return DISAS_NEXT;
3918 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
3920 int b1 = get_field(s->fields, b1);
3921 int d1 = get_field(s->fields, d1);
3922 int b2 = get_field(s->fields, b2);
3923 int d2 = get_field(s->fields, d2);
3924 int r3 = get_field(s->fields, r3);
3925 TCGv_i64 tmp = tcg_temp_new_i64();
3927 /* fetch all operands first */
3928 o->in1 = tcg_temp_new_i64();
3929 tcg_gen_addi_i64(o->in1, regs[b1], d1);
3930 o->in2 = tcg_temp_new_i64();
3931 tcg_gen_addi_i64(o->in2, regs[b2], d2);
3932 o->addr1 = get_address(s, 0, r3, 0);
3934 /* load the third operand into r3 before modifying anything */
3935 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
3937 /* subtract CPU timer from first operand and store in GR0 */
3938 gen_helper_stpt(tmp, cpu_env);
3939 tcg_gen_sub_i64(regs[0], o->in1, tmp);
3941 /* store second operand in GR1 */
3942 tcg_gen_mov_i64(regs[1], o->in2);
3944 tcg_temp_free_i64(tmp);
3945 return DISAS_NEXT;
3948 #ifndef CONFIG_USER_ONLY
3949 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
3951 check_privileged(s);
3952 tcg_gen_shri_i64(o->in2, o->in2, 4);
3953 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3954 return DISAS_NEXT;
3957 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
3959 check_privileged(s);
3960 gen_helper_sske(cpu_env, o->in1, o->in2);
3961 return DISAS_NEXT;
3964 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
3966 check_privileged(s);
3967 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3968 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3969 return DISAS_PC_STALE_NOCHAIN;
3972 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
3974 check_privileged(s);
3975 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3976 return DISAS_NEXT;
3979 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
3981 gen_helper_stck(o->out, cpu_env);
3982 /* ??? We don't implement clock states. */
3983 gen_op_movi_cc(s, 0);
3984 return DISAS_NEXT;
3987 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
3989 TCGv_i64 c1 = tcg_temp_new_i64();
3990 TCGv_i64 c2 = tcg_temp_new_i64();
3991 TCGv_i64 todpr = tcg_temp_new_i64();
3992 gen_helper_stck(c1, cpu_env);
3993 /* 16 bit value store in an uint32_t (only valid bits set) */
3994 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
3995 /* Shift the 64-bit value into its place as a zero-extended
3996 104-bit value. Note that "bit positions 64-103 are always
3997 non-zero so that they compare differently to STCK"; we set
3998 the least significant bit to 1. */
3999 tcg_gen_shli_i64(c2, c1, 56);
4000 tcg_gen_shri_i64(c1, c1, 8);
4001 tcg_gen_ori_i64(c2, c2, 0x10000);
4002 tcg_gen_or_i64(c2, c2, todpr);
4003 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4004 tcg_gen_addi_i64(o->in2, o->in2, 8);
4005 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4006 tcg_temp_free_i64(c1);
4007 tcg_temp_free_i64(c2);
4008 tcg_temp_free_i64(todpr);
4009 /* ??? We don't implement clock states. */
4010 gen_op_movi_cc(s, 0);
4011 return DISAS_NEXT;
4014 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4016 check_privileged(s);
4017 gen_helper_sckc(cpu_env, o->in2);
4018 return DISAS_NEXT;
4021 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4023 check_privileged(s);
4024 gen_helper_sckpf(cpu_env, regs[0]);
4025 return DISAS_NEXT;
4028 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4030 check_privileged(s);
4031 gen_helper_stckc(o->out, cpu_env);
4032 return DISAS_NEXT;
4035 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4037 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4038 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4039 check_privileged(s);
4040 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4041 tcg_temp_free_i32(r1);
4042 tcg_temp_free_i32(r3);
4043 return DISAS_NEXT;
4046 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4048 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4049 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4050 check_privileged(s);
4051 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4052 tcg_temp_free_i32(r1);
4053 tcg_temp_free_i32(r3);
4054 return DISAS_NEXT;
4057 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4059 check_privileged(s);
4060 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4061 return DISAS_NEXT;
4064 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4066 check_privileged(s);
4067 gen_helper_spt(cpu_env, o->in2);
4068 return DISAS_NEXT;
4071 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4073 check_privileged(s);
4074 gen_helper_stfl(cpu_env);
4075 return DISAS_NEXT;
4078 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4080 check_privileged(s);
4081 gen_helper_stpt(o->out, cpu_env);
4082 return DISAS_NEXT;
4085 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4087 check_privileged(s);
4088 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4089 set_cc_static(s);
4090 return DISAS_NEXT;
4093 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4095 check_privileged(s);
4096 gen_helper_spx(cpu_env, o->in2);
4097 return DISAS_NEXT;
4100 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4102 check_privileged(s);
4103 gen_helper_xsch(cpu_env, regs[1]);
4104 set_cc_static(s);
4105 return DISAS_NEXT;
4108 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4110 check_privileged(s);
4111 gen_helper_csch(cpu_env, regs[1]);
4112 set_cc_static(s);
4113 return DISAS_NEXT;
4116 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4118 check_privileged(s);
4119 gen_helper_hsch(cpu_env, regs[1]);
4120 set_cc_static(s);
4121 return DISAS_NEXT;
4124 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4126 check_privileged(s);
4127 gen_helper_msch(cpu_env, regs[1], o->in2);
4128 set_cc_static(s);
4129 return DISAS_NEXT;
4132 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4134 check_privileged(s);
4135 gen_helper_rchp(cpu_env, regs[1]);
4136 set_cc_static(s);
4137 return DISAS_NEXT;
4140 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4142 check_privileged(s);
4143 gen_helper_rsch(cpu_env, regs[1]);
4144 set_cc_static(s);
4145 return DISAS_NEXT;
4148 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4150 check_privileged(s);
4151 gen_helper_sal(cpu_env, regs[1]);
4152 return DISAS_NEXT;
4155 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4157 check_privileged(s);
4158 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4159 return DISAS_NEXT;
4162 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4164 check_privileged(s);
4165 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4166 gen_op_movi_cc(s, 3);
4167 return DISAS_NEXT;
4170 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4172 check_privileged(s);
4173 /* The instruction is suppressed if not provided. */
4174 return DISAS_NEXT;
4177 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4179 check_privileged(s);
4180 gen_helper_ssch(cpu_env, regs[1], o->in2);
4181 set_cc_static(s);
4182 return DISAS_NEXT;
4185 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4187 check_privileged(s);
4188 gen_helper_stsch(cpu_env, regs[1], o->in2);
4189 set_cc_static(s);
4190 return DISAS_NEXT;
4193 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4195 check_privileged(s);
4196 gen_helper_stcrw(cpu_env, o->in2);
4197 set_cc_static(s);
4198 return DISAS_NEXT;
4201 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4203 check_privileged(s);
4204 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4205 set_cc_static(s);
4206 return DISAS_NEXT;
4209 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4211 check_privileged(s);
4212 gen_helper_tsch(cpu_env, regs[1], o->in2);
4213 set_cc_static(s);
4214 return DISAS_NEXT;
4217 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4219 check_privileged(s);
4220 gen_helper_chsc(cpu_env, o->in2);
4221 set_cc_static(s);
4222 return DISAS_NEXT;
4225 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4227 check_privileged(s);
4228 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4229 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4230 return DISAS_NEXT;
4233 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4235 uint64_t i2 = get_field(s->fields, i2);
4236 TCGv_i64 t;
4238 check_privileged(s);
4240 /* It is important to do what the instruction name says: STORE THEN.
4241 If we let the output hook perform the store then if we fault and
4242 restart, we'll have the wrong SYSTEM MASK in place. */
4243 t = tcg_temp_new_i64();
4244 tcg_gen_shri_i64(t, psw_mask, 56);
4245 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4246 tcg_temp_free_i64(t);
4248 if (s->fields->op == 0xac) {
4249 tcg_gen_andi_i64(psw_mask, psw_mask,
4250 (i2 << 56) | 0x00ffffffffffffffull);
4251 } else {
4252 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4255 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4256 return DISAS_PC_STALE_NOCHAIN;
4259 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4261 check_privileged(s);
4262 gen_helper_stura(cpu_env, o->in2, o->in1);
4263 return DISAS_NEXT;
4266 static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4268 check_privileged(s);
4269 gen_helper_sturg(cpu_env, o->in2, o->in1);
4270 return DISAS_NEXT;
4272 #endif
4274 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4276 gen_helper_stfle(cc_op, cpu_env, o->in2);
4277 set_cc_static(s);
4278 return DISAS_NEXT;
4281 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4283 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4284 return DISAS_NEXT;
4287 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4289 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4290 return DISAS_NEXT;
4293 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4295 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4296 return DISAS_NEXT;
4299 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4301 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4302 return DISAS_NEXT;
4305 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4307 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4308 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4309 gen_helper_stam(cpu_env, r1, o->in2, r3);
4310 tcg_temp_free_i32(r1);
4311 tcg_temp_free_i32(r3);
4312 return DISAS_NEXT;
4315 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4317 int m3 = get_field(s->fields, m3);
4318 int pos, base = s->insn->data;
4319 TCGv_i64 tmp = tcg_temp_new_i64();
4321 pos = base + ctz32(m3) * 8;
4322 switch (m3) {
4323 case 0xf:
4324 /* Effectively a 32-bit store. */
4325 tcg_gen_shri_i64(tmp, o->in1, pos);
4326 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4327 break;
4329 case 0xc:
4330 case 0x6:
4331 case 0x3:
4332 /* Effectively a 16-bit store. */
4333 tcg_gen_shri_i64(tmp, o->in1, pos);
4334 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4335 break;
4337 case 0x8:
4338 case 0x4:
4339 case 0x2:
4340 case 0x1:
4341 /* Effectively an 8-bit store. */
4342 tcg_gen_shri_i64(tmp, o->in1, pos);
4343 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4344 break;
4346 default:
4347 /* This is going to be a sequence of shifts and stores. */
4348 pos = base + 32 - 8;
4349 while (m3) {
4350 if (m3 & 0x8) {
4351 tcg_gen_shri_i64(tmp, o->in1, pos);
4352 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4353 tcg_gen_addi_i64(o->in2, o->in2, 1);
4355 m3 = (m3 << 1) & 0xf;
4356 pos -= 8;
4358 break;
4360 tcg_temp_free_i64(tmp);
4361 return DISAS_NEXT;
4364 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4366 int r1 = get_field(s->fields, r1);
4367 int r3 = get_field(s->fields, r3);
4368 int size = s->insn->data;
4369 TCGv_i64 tsize = tcg_const_i64(size);
4371 while (1) {
4372 if (size == 8) {
4373 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4374 } else {
4375 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4377 if (r1 == r3) {
4378 break;
4380 tcg_gen_add_i64(o->in2, o->in2, tsize);
4381 r1 = (r1 + 1) & 15;
4384 tcg_temp_free_i64(tsize);
4385 return DISAS_NEXT;
4388 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4390 int r1 = get_field(s->fields, r1);
4391 int r3 = get_field(s->fields, r3);
4392 TCGv_i64 t = tcg_temp_new_i64();
4393 TCGv_i64 t4 = tcg_const_i64(4);
4394 TCGv_i64 t32 = tcg_const_i64(32);
4396 while (1) {
4397 tcg_gen_shl_i64(t, regs[r1], t32);
4398 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4399 if (r1 == r3) {
4400 break;
4402 tcg_gen_add_i64(o->in2, o->in2, t4);
4403 r1 = (r1 + 1) & 15;
4406 tcg_temp_free_i64(t);
4407 tcg_temp_free_i64(t4);
4408 tcg_temp_free_i64(t32);
4409 return DISAS_NEXT;
4412 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4414 if (tb_cflags(s->tb) & CF_PARALLEL) {
4415 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4416 } else {
4417 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4419 return DISAS_NEXT;
4422 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4424 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4425 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4427 gen_helper_srst(cpu_env, r1, r2);
4429 tcg_temp_free_i32(r1);
4430 tcg_temp_free_i32(r2);
4431 set_cc_static(s);
4432 return DISAS_NEXT;
4435 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4437 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4438 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4440 gen_helper_srstu(cpu_env, r1, r2);
4442 tcg_temp_free_i32(r1);
4443 tcg_temp_free_i32(r2);
4444 set_cc_static(s);
4445 return DISAS_NEXT;
4448 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4450 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4451 return DISAS_NEXT;
4454 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4456 DisasCompare cmp;
4457 TCGv_i64 borrow;
4459 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4461 /* The !borrow flag is the msb of CC. Since we want the inverse of
4462 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4463 disas_jcc(s, &cmp, 8 | 4);
4464 borrow = tcg_temp_new_i64();
4465 if (cmp.is_64) {
4466 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4467 } else {
4468 TCGv_i32 t = tcg_temp_new_i32();
4469 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4470 tcg_gen_extu_i32_i64(borrow, t);
4471 tcg_temp_free_i32(t);
4473 free_compare(&cmp);
4475 tcg_gen_sub_i64(o->out, o->out, borrow);
4476 tcg_temp_free_i64(borrow);
4477 return DISAS_NEXT;
4480 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4482 TCGv_i32 t;
4484 update_psw_addr(s);
4485 update_cc_op(s);
4487 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4488 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4489 tcg_temp_free_i32(t);
4491 t = tcg_const_i32(s->ilen);
4492 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4493 tcg_temp_free_i32(t);
4495 gen_exception(EXCP_SVC);
4496 return DISAS_NORETURN;
4499 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4501 int cc = 0;
4503 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4504 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4505 gen_op_movi_cc(s, cc);
4506 return DISAS_NEXT;
4509 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4511 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4512 set_cc_static(s);
4513 return DISAS_NEXT;
4516 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4518 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4519 set_cc_static(s);
4520 return DISAS_NEXT;
4523 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4525 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4526 set_cc_static(s);
4527 return DISAS_NEXT;
4530 #ifndef CONFIG_USER_ONLY
4532 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4534 check_privileged(s);
4535 gen_helper_testblock(cc_op, cpu_env, o->in2);
4536 set_cc_static(s);
4537 return DISAS_NEXT;
4540 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4542 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4543 set_cc_static(s);
4544 return DISAS_NEXT;
4547 #endif
4549 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4551 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4552 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4553 tcg_temp_free_i32(l1);
4554 set_cc_static(s);
4555 return DISAS_NEXT;
4558 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4560 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4561 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4562 tcg_temp_free_i32(l);
4563 set_cc_static(s);
4564 return DISAS_NEXT;
4567 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4569 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4570 return_low128(o->out2);
4571 set_cc_static(s);
4572 return DISAS_NEXT;
4575 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4577 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4578 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4579 tcg_temp_free_i32(l);
4580 set_cc_static(s);
4581 return DISAS_NEXT;
4584 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4586 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4587 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4588 tcg_temp_free_i32(l);
4589 set_cc_static(s);
4590 return DISAS_NEXT;
4593 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4595 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4596 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4597 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4598 TCGv_i32 tst = tcg_temp_new_i32();
4599 int m3 = get_field(s->fields, m3);
4601 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4602 m3 = 0;
4604 if (m3 & 1) {
4605 tcg_gen_movi_i32(tst, -1);
4606 } else {
4607 tcg_gen_extrl_i64_i32(tst, regs[0]);
4608 if (s->insn->opc & 3) {
4609 tcg_gen_ext8u_i32(tst, tst);
4610 } else {
4611 tcg_gen_ext16u_i32(tst, tst);
4614 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4616 tcg_temp_free_i32(r1);
4617 tcg_temp_free_i32(r2);
4618 tcg_temp_free_i32(sizes);
4619 tcg_temp_free_i32(tst);
4620 set_cc_static(s);
4621 return DISAS_NEXT;
4624 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4626 TCGv_i32 t1 = tcg_const_i32(0xff);
4627 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4628 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4629 tcg_temp_free_i32(t1);
4630 set_cc_static(s);
4631 return DISAS_NEXT;
4634 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4636 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4637 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4638 tcg_temp_free_i32(l);
4639 return DISAS_NEXT;
4642 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4644 int l1 = get_field(s->fields, l1) + 1;
4645 TCGv_i32 l;
4647 /* The length must not exceed 32 bytes. */
4648 if (l1 > 32) {
4649 gen_program_exception(s, PGM_SPECIFICATION);
4650 return DISAS_NORETURN;
4652 l = tcg_const_i32(l1);
4653 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4654 tcg_temp_free_i32(l);
4655 set_cc_static(s);
4656 return DISAS_NEXT;
4659 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4661 int l1 = get_field(s->fields, l1) + 1;
4662 TCGv_i32 l;
4664 /* The length must be even and should not exceed 64 bytes. */
4665 if ((l1 & 1) || (l1 > 64)) {
4666 gen_program_exception(s, PGM_SPECIFICATION);
4667 return DISAS_NORETURN;
4669 l = tcg_const_i32(l1);
4670 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4671 tcg_temp_free_i32(l);
4672 set_cc_static(s);
4673 return DISAS_NEXT;
4677 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4679 int d1 = get_field(s->fields, d1);
4680 int d2 = get_field(s->fields, d2);
4681 int b1 = get_field(s->fields, b1);
4682 int b2 = get_field(s->fields, b2);
4683 int l = get_field(s->fields, l1);
4684 TCGv_i32 t32;
4686 o->addr1 = get_address(s, 0, b1, d1);
4688 /* If the addresses are identical, this is a store/memset of zero. */
4689 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4690 o->in2 = tcg_const_i64(0);
4692 l++;
4693 while (l >= 8) {
4694 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4695 l -= 8;
4696 if (l > 0) {
4697 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4700 if (l >= 4) {
4701 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4702 l -= 4;
4703 if (l > 0) {
4704 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4707 if (l >= 2) {
4708 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4709 l -= 2;
4710 if (l > 0) {
4711 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4714 if (l) {
4715 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4717 gen_op_movi_cc(s, 0);
4718 return DISAS_NEXT;
4721 /* But in general we'll defer to a helper. */
4722 o->in2 = get_address(s, 0, b2, d2);
4723 t32 = tcg_const_i32(l);
4724 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4725 tcg_temp_free_i32(t32);
4726 set_cc_static(s);
4727 return DISAS_NEXT;
4730 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4732 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4733 return DISAS_NEXT;
4736 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4738 int shift = s->insn->data & 0xff;
4739 int size = s->insn->data >> 8;
4740 uint64_t mask = ((1ull << size) - 1) << shift;
4742 assert(!o->g_in2);
4743 tcg_gen_shli_i64(o->in2, o->in2, shift);
4744 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4746 /* Produce the CC from only the bits manipulated. */
4747 tcg_gen_andi_i64(cc_dst, o->out, mask);
4748 set_cc_nz_u64(s, cc_dst);
4749 return DISAS_NEXT;
4752 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4754 o->in1 = tcg_temp_new_i64();
4756 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4757 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4758 } else {
4759 /* Perform the atomic operation in memory. */
4760 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4761 s->insn->data);
4764 /* Recompute also for atomic case: needed for setting CC. */
4765 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4767 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4768 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4770 return DISAS_NEXT;
4773 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4775 o->out = tcg_const_i64(0);
4776 return DISAS_NEXT;
4779 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4781 o->out = tcg_const_i64(0);
4782 o->out2 = o->out;
4783 o->g_out2 = true;
4784 return DISAS_NEXT;
4787 #ifndef CONFIG_USER_ONLY
4788 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4790 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4792 check_privileged(s);
4793 gen_helper_clp(cpu_env, r2);
4794 tcg_temp_free_i32(r2);
4795 set_cc_static(s);
4796 return DISAS_NEXT;
4799 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4801 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4802 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4804 check_privileged(s);
4805 gen_helper_pcilg(cpu_env, r1, r2);
4806 tcg_temp_free_i32(r1);
4807 tcg_temp_free_i32(r2);
4808 set_cc_static(s);
4809 return DISAS_NEXT;
4812 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4814 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4815 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4817 check_privileged(s);
4818 gen_helper_pcistg(cpu_env, r1, r2);
4819 tcg_temp_free_i32(r1);
4820 tcg_temp_free_i32(r2);
4821 set_cc_static(s);
4822 return DISAS_NEXT;
4825 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4827 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4828 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4830 check_privileged(s);
4831 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4832 tcg_temp_free_i32(ar);
4833 tcg_temp_free_i32(r1);
4834 set_cc_static(s);
4835 return DISAS_NEXT;
4838 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4840 check_privileged(s);
4841 gen_helper_sic(cpu_env, o->in1, o->in2);
4842 return DISAS_NEXT;
4845 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4847 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4848 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4850 check_privileged(s);
4851 gen_helper_rpcit(cpu_env, r1, r2);
4852 tcg_temp_free_i32(r1);
4853 tcg_temp_free_i32(r2);
4854 set_cc_static(s);
4855 return DISAS_NEXT;
4858 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4860 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4861 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4862 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4864 check_privileged(s);
4865 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4866 tcg_temp_free_i32(ar);
4867 tcg_temp_free_i32(r1);
4868 tcg_temp_free_i32(r3);
4869 set_cc_static(s);
4870 return DISAS_NEXT;
4873 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4875 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4876 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4878 check_privileged(s);
4879 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4880 tcg_temp_free_i32(ar);
4881 tcg_temp_free_i32(r1);
4882 set_cc_static(s);
4883 return DISAS_NEXT;
4885 #endif
4887 /* ====================================================================== */
4888 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4889 the original inputs), update the various cc data structures in order to
4890 be able to compute the new condition code. */
4892 static void cout_abs32(DisasContext *s, DisasOps *o)
4894 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4897 static void cout_abs64(DisasContext *s, DisasOps *o)
4899 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4902 static void cout_adds32(DisasContext *s, DisasOps *o)
4904 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4907 static void cout_adds64(DisasContext *s, DisasOps *o)
4909 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4912 static void cout_addu32(DisasContext *s, DisasOps *o)
4914 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4917 static void cout_addu64(DisasContext *s, DisasOps *o)
4919 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4922 static void cout_addc32(DisasContext *s, DisasOps *o)
4924 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4927 static void cout_addc64(DisasContext *s, DisasOps *o)
4929 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4932 static void cout_cmps32(DisasContext *s, DisasOps *o)
4934 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4937 static void cout_cmps64(DisasContext *s, DisasOps *o)
4939 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4942 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4944 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4947 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4949 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4952 static void cout_f32(DisasContext *s, DisasOps *o)
4954 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4957 static void cout_f64(DisasContext *s, DisasOps *o)
4959 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4962 static void cout_f128(DisasContext *s, DisasOps *o)
4964 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4967 static void cout_nabs32(DisasContext *s, DisasOps *o)
4969 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4972 static void cout_nabs64(DisasContext *s, DisasOps *o)
4974 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4977 static void cout_neg32(DisasContext *s, DisasOps *o)
4979 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4982 static void cout_neg64(DisasContext *s, DisasOps *o)
4984 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4987 static void cout_nz32(DisasContext *s, DisasOps *o)
4989 tcg_gen_ext32u_i64(cc_dst, o->out);
4990 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4993 static void cout_nz64(DisasContext *s, DisasOps *o)
4995 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4998 static void cout_s32(DisasContext *s, DisasOps *o)
5000 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5003 static void cout_s64(DisasContext *s, DisasOps *o)
5005 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5008 static void cout_subs32(DisasContext *s, DisasOps *o)
5010 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5013 static void cout_subs64(DisasContext *s, DisasOps *o)
5015 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5018 static void cout_subu32(DisasContext *s, DisasOps *o)
5020 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5023 static void cout_subu64(DisasContext *s, DisasOps *o)
5025 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5028 static void cout_subb32(DisasContext *s, DisasOps *o)
5030 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5033 static void cout_subb64(DisasContext *s, DisasOps *o)
5035 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5038 static void cout_tm32(DisasContext *s, DisasOps *o)
5040 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5043 static void cout_tm64(DisasContext *s, DisasOps *o)
5045 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5048 /* ====================================================================== */
5049 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5050 with the TCG register to which we will write. Used in combination with
5051 the "wout" generators, in some cases we need a new temporary, and in
5052 some cases we can write to a TCG global. */
5054 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5056 o->out = tcg_temp_new_i64();
5058 #define SPEC_prep_new 0
5060 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5062 o->out = tcg_temp_new_i64();
5063 o->out2 = tcg_temp_new_i64();
5065 #define SPEC_prep_new_P 0
5067 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5069 o->out = regs[get_field(f, r1)];
5070 o->g_out = true;
5072 #define SPEC_prep_r1 0
5074 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5076 int r1 = get_field(f, r1);
5077 o->out = regs[r1];
5078 o->out2 = regs[r1 + 1];
5079 o->g_out = o->g_out2 = true;
5081 #define SPEC_prep_r1_P SPEC_r1_even
5083 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5085 o->out = fregs[get_field(f, r1)];
5086 o->g_out = true;
5088 #define SPEC_prep_f1 0
5090 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5092 int r1 = get_field(f, r1);
5093 o->out = fregs[r1];
5094 o->out2 = fregs[r1 + 2];
5095 o->g_out = o->g_out2 = true;
5097 #define SPEC_prep_x1 SPEC_r1_f128
5099 /* ====================================================================== */
5100 /* The "Write OUTput" generators. These generally perform some non-trivial
5101 copy of data to TCG globals, or to main memory. The trivial cases are
5102 generally handled by having a "prep" generator install the TCG global
5103 as the destination of the operation. */
5105 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5107 store_reg(get_field(f, r1), o->out);
5109 #define SPEC_wout_r1 0
5111 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5113 int r1 = get_field(f, r1);
5114 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5116 #define SPEC_wout_r1_8 0
5118 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5120 int r1 = get_field(f, r1);
5121 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5123 #define SPEC_wout_r1_16 0
5125 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5127 store_reg32_i64(get_field(f, r1), o->out);
5129 #define SPEC_wout_r1_32 0
5131 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5133 store_reg32h_i64(get_field(f, r1), o->out);
5135 #define SPEC_wout_r1_32h 0
5137 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5139 int r1 = get_field(f, r1);
5140 store_reg32_i64(r1, o->out);
5141 store_reg32_i64(r1 + 1, o->out2);
5143 #define SPEC_wout_r1_P32 SPEC_r1_even
5145 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5147 int r1 = get_field(f, r1);
5148 store_reg32_i64(r1 + 1, o->out);
5149 tcg_gen_shri_i64(o->out, o->out, 32);
5150 store_reg32_i64(r1, o->out);
5152 #define SPEC_wout_r1_D32 SPEC_r1_even
5154 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5156 int r3 = get_field(f, r3);
5157 store_reg32_i64(r3, o->out);
5158 store_reg32_i64(r3 + 1, o->out2);
5160 #define SPEC_wout_r3_P32 SPEC_r3_even
5162 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5164 int r3 = get_field(f, r3);
5165 store_reg(r3, o->out);
5166 store_reg(r3 + 1, o->out2);
5168 #define SPEC_wout_r3_P64 SPEC_r3_even
5170 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5172 store_freg32_i64(get_field(f, r1), o->out);
5174 #define SPEC_wout_e1 0
5176 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5178 store_freg(get_field(f, r1), o->out);
5180 #define SPEC_wout_f1 0
5182 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5184 int f1 = get_field(s->fields, r1);
5185 store_freg(f1, o->out);
5186 store_freg(f1 + 2, o->out2);
5188 #define SPEC_wout_x1 SPEC_r1_f128
5190 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5192 if (get_field(f, r1) != get_field(f, r2)) {
5193 store_reg32_i64(get_field(f, r1), o->out);
5196 #define SPEC_wout_cond_r1r2_32 0
5198 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5200 if (get_field(f, r1) != get_field(f, r2)) {
5201 store_freg32_i64(get_field(f, r1), o->out);
5204 #define SPEC_wout_cond_e1e2 0
5206 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5208 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5210 #define SPEC_wout_m1_8 0
5212 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5214 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5216 #define SPEC_wout_m1_16 0
5218 #ifndef CONFIG_USER_ONLY
5219 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5221 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5223 #define SPEC_wout_m1_16a 0
5224 #endif
5226 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5228 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5230 #define SPEC_wout_m1_32 0
5232 #ifndef CONFIG_USER_ONLY
5233 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5235 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5237 #define SPEC_wout_m1_32a 0
5238 #endif
5240 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5242 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5244 #define SPEC_wout_m1_64 0
5246 #ifndef CONFIG_USER_ONLY
5247 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5249 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5251 #define SPEC_wout_m1_64a 0
5252 #endif
5254 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5256 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5258 #define SPEC_wout_m2_32 0
5260 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5262 store_reg(get_field(f, r1), o->in2);
5264 #define SPEC_wout_in2_r1 0
5266 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5268 store_reg32_i64(get_field(f, r1), o->in2);
5270 #define SPEC_wout_in2_r1_32 0
5272 /* ====================================================================== */
5273 /* The "INput 1" generators. These load the first operand to an insn. */
5275 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5277 o->in1 = load_reg(get_field(f, r1));
5279 #define SPEC_in1_r1 0
5281 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5283 o->in1 = regs[get_field(f, r1)];
5284 o->g_in1 = true;
5286 #define SPEC_in1_r1_o 0
5288 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5290 o->in1 = tcg_temp_new_i64();
5291 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5293 #define SPEC_in1_r1_32s 0
5295 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5297 o->in1 = tcg_temp_new_i64();
5298 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5300 #define SPEC_in1_r1_32u 0
5302 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5304 o->in1 = tcg_temp_new_i64();
5305 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5307 #define SPEC_in1_r1_sr32 0
5309 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5311 o->in1 = load_reg(get_field(f, r1) + 1);
5313 #define SPEC_in1_r1p1 SPEC_r1_even
5315 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5317 o->in1 = tcg_temp_new_i64();
5318 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5320 #define SPEC_in1_r1p1_32s SPEC_r1_even
5322 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5324 o->in1 = tcg_temp_new_i64();
5325 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5327 #define SPEC_in1_r1p1_32u SPEC_r1_even
5329 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5331 int r1 = get_field(f, r1);
5332 o->in1 = tcg_temp_new_i64();
5333 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5335 #define SPEC_in1_r1_D32 SPEC_r1_even
5337 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5339 o->in1 = load_reg(get_field(f, r2));
5341 #define SPEC_in1_r2 0
5343 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5345 o->in1 = tcg_temp_new_i64();
5346 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5348 #define SPEC_in1_r2_sr32 0
5350 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5352 o->in1 = load_reg(get_field(f, r3));
5354 #define SPEC_in1_r3 0
5356 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5358 o->in1 = regs[get_field(f, r3)];
5359 o->g_in1 = true;
5361 #define SPEC_in1_r3_o 0
5363 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5365 o->in1 = tcg_temp_new_i64();
5366 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5368 #define SPEC_in1_r3_32s 0
5370 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5372 o->in1 = tcg_temp_new_i64();
5373 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5375 #define SPEC_in1_r3_32u 0
5377 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5379 int r3 = get_field(f, r3);
5380 o->in1 = tcg_temp_new_i64();
5381 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5383 #define SPEC_in1_r3_D32 SPEC_r3_even
5385 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5387 o->in1 = load_freg32_i64(get_field(f, r1));
5389 #define SPEC_in1_e1 0
5391 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5393 o->in1 = fregs[get_field(f, r1)];
5394 o->g_in1 = true;
5396 #define SPEC_in1_f1_o 0
5398 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5400 int r1 = get_field(f, r1);
5401 o->out = fregs[r1];
5402 o->out2 = fregs[r1 + 2];
5403 o->g_out = o->g_out2 = true;
5405 #define SPEC_in1_x1_o SPEC_r1_f128
5407 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5409 o->in1 = fregs[get_field(f, r3)];
5410 o->g_in1 = true;
5412 #define SPEC_in1_f3_o 0
5414 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5416 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5418 #define SPEC_in1_la1 0
5420 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5422 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5423 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5425 #define SPEC_in1_la2 0
5427 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5429 in1_la1(s, f, o);
5430 o->in1 = tcg_temp_new_i64();
5431 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5433 #define SPEC_in1_m1_8u 0
5435 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5437 in1_la1(s, f, o);
5438 o->in1 = tcg_temp_new_i64();
5439 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5441 #define SPEC_in1_m1_16s 0
5443 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5445 in1_la1(s, f, o);
5446 o->in1 = tcg_temp_new_i64();
5447 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5449 #define SPEC_in1_m1_16u 0
5451 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5453 in1_la1(s, f, o);
5454 o->in1 = tcg_temp_new_i64();
5455 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5457 #define SPEC_in1_m1_32s 0
5459 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5461 in1_la1(s, f, o);
5462 o->in1 = tcg_temp_new_i64();
5463 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5465 #define SPEC_in1_m1_32u 0
5467 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5469 in1_la1(s, f, o);
5470 o->in1 = tcg_temp_new_i64();
5471 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5473 #define SPEC_in1_m1_64 0
5475 /* ====================================================================== */
5476 /* The "INput 2" generators. These load the second operand to an insn. */
5478 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5480 o->in2 = regs[get_field(f, r1)];
5481 o->g_in2 = true;
5483 #define SPEC_in2_r1_o 0
5485 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5487 o->in2 = tcg_temp_new_i64();
5488 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5490 #define SPEC_in2_r1_16u 0
5492 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5494 o->in2 = tcg_temp_new_i64();
5495 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5497 #define SPEC_in2_r1_32u 0
5499 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5501 int r1 = get_field(f, r1);
5502 o->in2 = tcg_temp_new_i64();
5503 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5505 #define SPEC_in2_r1_D32 SPEC_r1_even
5507 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5509 o->in2 = load_reg(get_field(f, r2));
5511 #define SPEC_in2_r2 0
5513 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5515 o->in2 = regs[get_field(f, r2)];
5516 o->g_in2 = true;
5518 #define SPEC_in2_r2_o 0
5520 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5522 int r2 = get_field(f, r2);
5523 if (r2 != 0) {
5524 o->in2 = load_reg(r2);
5527 #define SPEC_in2_r2_nz 0
5529 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5531 o->in2 = tcg_temp_new_i64();
5532 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5534 #define SPEC_in2_r2_8s 0
5536 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5538 o->in2 = tcg_temp_new_i64();
5539 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5541 #define SPEC_in2_r2_8u 0
5543 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5545 o->in2 = tcg_temp_new_i64();
5546 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5548 #define SPEC_in2_r2_16s 0
5550 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5552 o->in2 = tcg_temp_new_i64();
5553 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5555 #define SPEC_in2_r2_16u 0
5557 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5559 o->in2 = load_reg(get_field(f, r3));
5561 #define SPEC_in2_r3 0
5563 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5565 o->in2 = tcg_temp_new_i64();
5566 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5568 #define SPEC_in2_r3_sr32 0
5570 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5572 o->in2 = tcg_temp_new_i64();
5573 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5575 #define SPEC_in2_r2_32s 0
5577 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5579 o->in2 = tcg_temp_new_i64();
5580 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5582 #define SPEC_in2_r2_32u 0
5584 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5586 o->in2 = tcg_temp_new_i64();
5587 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5589 #define SPEC_in2_r2_sr32 0
5591 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5593 o->in2 = load_freg32_i64(get_field(f, r2));
5595 #define SPEC_in2_e2 0
5597 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5599 o->in2 = fregs[get_field(f, r2)];
5600 o->g_in2 = true;
5602 #define SPEC_in2_f2_o 0
5604 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5606 int r2 = get_field(f, r2);
5607 o->in1 = fregs[r2];
5608 o->in2 = fregs[r2 + 2];
5609 o->g_in1 = o->g_in2 = true;
5611 #define SPEC_in2_x2_o SPEC_r2_f128
5613 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5615 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5617 #define SPEC_in2_ra2 0
5619 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5621 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5622 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5624 #define SPEC_in2_a2 0
5626 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5628 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5630 #define SPEC_in2_ri2 0
5632 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5634 help_l2_shift(s, f, o, 31);
5636 #define SPEC_in2_sh32 0
5638 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5640 help_l2_shift(s, f, o, 63);
5642 #define SPEC_in2_sh64 0
5644 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5646 in2_a2(s, f, o);
5647 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5649 #define SPEC_in2_m2_8u 0
5651 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5653 in2_a2(s, f, o);
5654 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5656 #define SPEC_in2_m2_16s 0
5658 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5660 in2_a2(s, f, o);
5661 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5663 #define SPEC_in2_m2_16u 0
5665 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5667 in2_a2(s, f, o);
5668 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5670 #define SPEC_in2_m2_32s 0
5672 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5674 in2_a2(s, f, o);
5675 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5677 #define SPEC_in2_m2_32u 0
5679 #ifndef CONFIG_USER_ONLY
5680 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5682 in2_a2(s, f, o);
5683 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5685 #define SPEC_in2_m2_32ua 0
5686 #endif
5688 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5690 in2_a2(s, f, o);
5691 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5693 #define SPEC_in2_m2_64 0
5695 #ifndef CONFIG_USER_ONLY
5696 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5698 in2_a2(s, f, o);
5699 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5701 #define SPEC_in2_m2_64a 0
5702 #endif
5704 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5706 in2_ri2(s, f, o);
5707 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5709 #define SPEC_in2_mri2_16u 0
5711 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5713 in2_ri2(s, f, o);
5714 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5716 #define SPEC_in2_mri2_32s 0
5718 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5720 in2_ri2(s, f, o);
5721 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5723 #define SPEC_in2_mri2_32u 0
5725 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5727 in2_ri2(s, f, o);
5728 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5730 #define SPEC_in2_mri2_64 0
5732 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5734 o->in2 = tcg_const_i64(get_field(f, i2));
5736 #define SPEC_in2_i2 0
5738 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5740 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5742 #define SPEC_in2_i2_8u 0
5744 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5746 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5748 #define SPEC_in2_i2_16u 0
5750 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5752 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5754 #define SPEC_in2_i2_32u 0
5756 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5758 uint64_t i2 = (uint16_t)get_field(f, i2);
5759 o->in2 = tcg_const_i64(i2 << s->insn->data);
5761 #define SPEC_in2_i2_16u_shl 0
5763 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5765 uint64_t i2 = (uint32_t)get_field(f, i2);
5766 o->in2 = tcg_const_i64(i2 << s->insn->data);
5768 #define SPEC_in2_i2_32u_shl 0
5770 #ifndef CONFIG_USER_ONLY
5771 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5773 o->in2 = tcg_const_i64(s->fields->raw_insn);
5775 #define SPEC_in2_insn 0
5776 #endif
5778 /* ====================================================================== */
5780 /* Find opc within the table of insns. This is formulated as a switch
5781 statement so that (1) we get compile-time notice of cut-paste errors
5782 for duplicated opcodes, and (2) the compiler generates the binary
5783 search tree, rather than us having to post-process the table. */
5785 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5786 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5788 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5790 enum DisasInsnEnum {
5791 #include "insn-data.def"
5794 #undef D
5795 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5796 .opc = OPC, \
5797 .fmt = FMT_##FT, \
5798 .fac = FAC_##FC, \
5799 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5800 .name = #NM, \
5801 .help_in1 = in1_##I1, \
5802 .help_in2 = in2_##I2, \
5803 .help_prep = prep_##P, \
5804 .help_wout = wout_##W, \
5805 .help_cout = cout_##CC, \
5806 .help_op = op_##OP, \
5807 .data = D \
5810 /* Allow 0 to be used for NULL in the table below. */
5811 #define in1_0 NULL
5812 #define in2_0 NULL
5813 #define prep_0 NULL
5814 #define wout_0 NULL
5815 #define cout_0 NULL
5816 #define op_0 NULL
5818 #define SPEC_in1_0 0
5819 #define SPEC_in2_0 0
5820 #define SPEC_prep_0 0
5821 #define SPEC_wout_0 0
5823 /* Give smaller names to the various facilities. */
5824 #define FAC_Z S390_FEAT_ZARCH
5825 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5826 #define FAC_DFP S390_FEAT_DFP
5827 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5828 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5829 #define FAC_EE S390_FEAT_EXECUTE_EXT
5830 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5831 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5832 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5833 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5834 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5835 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5836 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5837 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5838 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5839 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5840 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5841 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5842 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5843 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5844 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5845 #define FAC_SFLE S390_FEAT_STFLE
5846 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5847 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5848 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5849 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5850 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5851 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5852 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5853 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5854 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5855 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5856 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5857 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5858 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5859 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5860 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5861 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5863 static const DisasInsn insn_info[] = {
5864 #include "insn-data.def"
5867 #undef D
5868 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5869 case OPC: return &insn_info[insn_ ## NM];
5871 static const DisasInsn *lookup_opc(uint16_t opc)
5873 switch (opc) {
5874 #include "insn-data.def"
5875 default:
5876 return NULL;
5880 #undef D
5881 #undef C
5883 /* Extract a field from the insn. The INSN should be left-aligned in
5884 the uint64_t so that we can more easily utilize the big-bit-endian
5885 definitions we extract from the Principals of Operation. */
5887 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5889 uint32_t r, m;
5891 if (f->size == 0) {
5892 return;
5895 /* Zero extract the field from the insn. */
5896 r = (insn << f->beg) >> (64 - f->size);
5898 /* Sign-extend, or un-swap the field as necessary. */
5899 switch (f->type) {
5900 case 0: /* unsigned */
5901 break;
5902 case 1: /* signed */
5903 assert(f->size <= 32);
5904 m = 1u << (f->size - 1);
5905 r = (r ^ m) - m;
5906 break;
5907 case 2: /* dl+dh split, signed 20 bit. */
5908 r = ((int8_t)r << 12) | (r >> 8);
5909 break;
5910 default:
5911 abort();
5914 /* Validate that the "compressed" encoding we selected above is valid.
5915 I.e. we havn't make two different original fields overlap. */
5916 assert(((o->presentC >> f->indexC) & 1) == 0);
5917 o->presentC |= 1 << f->indexC;
5918 o->presentO |= 1 << f->indexO;
5920 o->c[f->indexC] = r;
5923 /* Lookup the insn at the current PC, extracting the operands into O and
5924 returning the info struct for the insn. Returns NULL for invalid insn. */
5926 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5927 DisasFields *f)
5929 uint64_t insn, pc = s->pc;
5930 int op, op2, ilen;
5931 const DisasInsn *info;
5933 if (unlikely(s->ex_value)) {
5934 /* Drop the EX data now, so that it's clear on exception paths. */
5935 TCGv_i64 zero = tcg_const_i64(0);
5936 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5937 tcg_temp_free_i64(zero);
5939 /* Extract the values saved by EXECUTE. */
5940 insn = s->ex_value & 0xffffffffffff0000ull;
5941 ilen = s->ex_value & 0xf;
5942 op = insn >> 56;
5943 } else {
5944 insn = ld_code2(env, pc);
5945 op = (insn >> 8) & 0xff;
5946 ilen = get_ilen(op);
5947 switch (ilen) {
5948 case 2:
5949 insn = insn << 48;
5950 break;
5951 case 4:
5952 insn = ld_code4(env, pc) << 32;
5953 break;
5954 case 6:
5955 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5956 break;
5957 default:
5958 g_assert_not_reached();
5961 s->next_pc = s->pc + ilen;
5962 s->ilen = ilen;
5964 /* We can't actually determine the insn format until we've looked up
5965 the full insn opcode. Which we can't do without locating the
5966 secondary opcode. Assume by default that OP2 is at bit 40; for
5967 those smaller insns that don't actually have a secondary opcode
5968 this will correctly result in OP2 = 0. */
5969 switch (op) {
5970 case 0x01: /* E */
5971 case 0x80: /* S */
5972 case 0x82: /* S */
5973 case 0x93: /* S */
5974 case 0xb2: /* S, RRF, RRE, IE */
5975 case 0xb3: /* RRE, RRD, RRF */
5976 case 0xb9: /* RRE, RRF */
5977 case 0xe5: /* SSE, SIL */
5978 op2 = (insn << 8) >> 56;
5979 break;
5980 case 0xa5: /* RI */
5981 case 0xa7: /* RI */
5982 case 0xc0: /* RIL */
5983 case 0xc2: /* RIL */
5984 case 0xc4: /* RIL */
5985 case 0xc6: /* RIL */
5986 case 0xc8: /* SSF */
5987 case 0xcc: /* RIL */
5988 op2 = (insn << 12) >> 60;
5989 break;
5990 case 0xc5: /* MII */
5991 case 0xc7: /* SMI */
5992 case 0xd0 ... 0xdf: /* SS */
5993 case 0xe1: /* SS */
5994 case 0xe2: /* SS */
5995 case 0xe8: /* SS */
5996 case 0xe9: /* SS */
5997 case 0xea: /* SS */
5998 case 0xee ... 0xf3: /* SS */
5999 case 0xf8 ... 0xfd: /* SS */
6000 op2 = 0;
6001 break;
6002 default:
6003 op2 = (insn << 40) >> 56;
6004 break;
6007 memset(f, 0, sizeof(*f));
6008 f->raw_insn = insn;
6009 f->op = op;
6010 f->op2 = op2;
6012 /* Lookup the instruction. */
6013 info = lookup_opc(op << 8 | op2);
6015 /* If we found it, extract the operands. */
6016 if (info != NULL) {
6017 DisasFormat fmt = info->fmt;
6018 int i;
6020 for (i = 0; i < NUM_C_FIELD; ++i) {
6021 extract_field(f, &format_info[fmt].op[i], insn);
6024 return info;
6027 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6029 const DisasInsn *insn;
6030 DisasJumpType ret = DISAS_NEXT;
6031 DisasFields f;
6032 DisasOps o;
6034 /* Search for the insn in the table. */
6035 insn = extract_insn(env, s, &f);
6037 /* Not found means unimplemented/illegal opcode. */
6038 if (insn == NULL) {
6039 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6040 f.op, f.op2);
6041 gen_illegal_opcode(s);
6042 return DISAS_NORETURN;
6045 #ifndef CONFIG_USER_ONLY
6046 if (s->tb->flags & FLAG_MASK_PER) {
6047 TCGv_i64 addr = tcg_const_i64(s->pc);
6048 gen_helper_per_ifetch(cpu_env, addr);
6049 tcg_temp_free_i64(addr);
6051 #endif
6053 /* Check for insn specification exceptions. */
6054 if (insn->spec) {
6055 int spec = insn->spec, excp = 0, r;
6057 if (spec & SPEC_r1_even) {
6058 r = get_field(&f, r1);
6059 if (r & 1) {
6060 excp = PGM_SPECIFICATION;
6063 if (spec & SPEC_r2_even) {
6064 r = get_field(&f, r2);
6065 if (r & 1) {
6066 excp = PGM_SPECIFICATION;
6069 if (spec & SPEC_r3_even) {
6070 r = get_field(&f, r3);
6071 if (r & 1) {
6072 excp = PGM_SPECIFICATION;
6075 if (spec & SPEC_r1_f128) {
6076 r = get_field(&f, r1);
6077 if (r > 13) {
6078 excp = PGM_SPECIFICATION;
6081 if (spec & SPEC_r2_f128) {
6082 r = get_field(&f, r2);
6083 if (r > 13) {
6084 excp = PGM_SPECIFICATION;
6087 if (excp) {
6088 gen_program_exception(s, excp);
6089 return DISAS_NORETURN;
6093 /* Set up the strutures we use to communicate with the helpers. */
6094 s->insn = insn;
6095 s->fields = &f;
6096 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6097 o.out = NULL;
6098 o.out2 = NULL;
6099 o.in1 = NULL;
6100 o.in2 = NULL;
6101 o.addr1 = NULL;
6103 /* Implement the instruction. */
6104 if (insn->help_in1) {
6105 insn->help_in1(s, &f, &o);
6107 if (insn->help_in2) {
6108 insn->help_in2(s, &f, &o);
6110 if (insn->help_prep) {
6111 insn->help_prep(s, &f, &o);
6113 if (insn->help_op) {
6114 ret = insn->help_op(s, &o);
6116 if (insn->help_wout) {
6117 insn->help_wout(s, &f, &o);
6119 if (insn->help_cout) {
6120 insn->help_cout(s, &o);
6123 /* Free any temporaries created by the helpers. */
6124 if (o.out && !o.g_out) {
6125 tcg_temp_free_i64(o.out);
6127 if (o.out2 && !o.g_out2) {
6128 tcg_temp_free_i64(o.out2);
6130 if (o.in1 && !o.g_in1) {
6131 tcg_temp_free_i64(o.in1);
6133 if (o.in2 && !o.g_in2) {
6134 tcg_temp_free_i64(o.in2);
6136 if (o.addr1) {
6137 tcg_temp_free_i64(o.addr1);
6140 #ifndef CONFIG_USER_ONLY
6141 if (s->tb->flags & FLAG_MASK_PER) {
6142 /* An exception might be triggered, save PSW if not already done. */
6143 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6144 tcg_gen_movi_i64(psw_addr, s->next_pc);
6147 /* Call the helper to check for a possible PER exception. */
6148 gen_helper_per_check_exception(cpu_env);
6150 #endif
6152 /* Advance to the next instruction. */
6153 s->pc = s->next_pc;
6154 return ret;
6157 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
6159 CPUS390XState *env = cs->env_ptr;
6160 DisasContext dc;
6161 target_ulong pc_start;
6162 uint64_t page_start;
6163 int num_insns, max_insns;
6164 DisasJumpType status;
6165 bool do_debug;
6167 pc_start = tb->pc;
6169 /* 31-bit mode */
6170 if (!(tb->flags & FLAG_MASK_64)) {
6171 pc_start &= 0x7fffffff;
6174 dc.tb = tb;
6175 dc.pc = pc_start;
6176 dc.cc_op = CC_OP_DYNAMIC;
6177 dc.ex_value = tb->cs_base;
6178 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
6180 page_start = pc_start & TARGET_PAGE_MASK;
6182 num_insns = 0;
6183 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
6184 if (max_insns == 0) {
6185 max_insns = CF_COUNT_MASK;
6187 if (max_insns > TCG_MAX_INSNS) {
6188 max_insns = TCG_MAX_INSNS;
6191 gen_tb_start(tb);
6193 do {
6194 tcg_gen_insn_start(dc.pc, dc.cc_op);
6195 num_insns++;
6197 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
6198 status = DISAS_PC_STALE;
6199 do_debug = true;
6200 /* The address covered by the breakpoint must be included in
6201 [tb->pc, tb->pc + tb->size) in order to for it to be
6202 properly cleared -- thus we increment the PC here so that
6203 the logic setting tb->size below does the right thing. */
6204 dc.pc += 2;
6205 break;
6208 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
6209 gen_io_start();
6212 status = translate_one(env, &dc);
6214 /* If we reach a page boundary, are single stepping,
6215 or exhaust instruction count, stop generation. */
6216 if (status == DISAS_NEXT
6217 && (dc.pc - page_start >= TARGET_PAGE_SIZE
6218 || tcg_op_buf_full()
6219 || num_insns >= max_insns
6220 || singlestep
6221 || cs->singlestep_enabled
6222 || dc.ex_value)) {
6223 status = DISAS_TOO_MANY;
6225 } while (status == DISAS_NEXT);
6227 if (tb_cflags(tb) & CF_LAST_IO) {
6228 gen_io_end();
6231 switch (status) {
6232 case DISAS_GOTO_TB:
6233 case DISAS_NORETURN:
6234 break;
6235 case DISAS_TOO_MANY:
6236 case DISAS_PC_STALE:
6237 case DISAS_PC_STALE_NOCHAIN:
6238 update_psw_addr(&dc);
6239 /* FALLTHRU */
6240 case DISAS_PC_UPDATED:
6241 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6242 cc op type is in env */
6243 update_cc_op(&dc);
6244 /* FALLTHRU */
6245 case DISAS_PC_CC_UPDATED:
6246 /* Exit the TB, either by raising a debug exception or by return. */
6247 if (do_debug) {
6248 gen_exception(EXCP_DEBUG);
6249 } else if (use_exit_tb(&dc) || status == DISAS_PC_STALE_NOCHAIN) {
6250 tcg_gen_exit_tb(0);
6251 } else {
6252 tcg_gen_lookup_and_goto_ptr();
6254 break;
6255 default:
6256 g_assert_not_reached();
6259 gen_tb_end(tb, num_insns);
6261 tb->size = dc.pc - pc_start;
6262 tb->icount = num_insns;
6264 #if defined(S390X_DEBUG_DISAS)
6265 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
6266 && qemu_log_in_addr_range(pc_start)) {
6267 qemu_log_lock();
6268 if (unlikely(dc.ex_value)) {
6269 /* ??? Unfortunately log_target_disas can't use host memory. */
6270 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
6271 } else {
6272 qemu_log("IN: %s\n", lookup_symbol(pc_start));
6273 log_target_disas(cs, pc_start, dc.pc - pc_start);
6274 qemu_log("\n");
6276 qemu_log_unlock();
6278 #endif
6281 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6282 target_ulong *data)
6284 int cc_op = data[1];
6285 env->psw.addr = data[0];
6286 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6287 env->cc_op = cc_op;