s390x/tcg: fix disabling/enabling DAT
[qemu.git] / target / s390x / translate.c
blob5aea3bbca62cdf82eaac77f6202b6afd88a1653f
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/log.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t ex_value;
58 uint64_t pc, next_pc;
59 uint32_t ilen;
60 enum cc_op cc_op;
61 bool singlestep_enabled;
64 /* Information carried about a condition to be evaluated. */
65 typedef struct {
66 TCGCond cond:8;
67 bool is_64;
68 bool g1;
69 bool g2;
70 union {
71 struct { TCGv_i64 a, b; } s64;
72 struct { TCGv_i32 a, b; } s32;
73 } u;
74 } DisasCompare;
76 /* is_jmp field values */
77 #define DISAS_EXCP DISAS_TARGET_0
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
91 return pc;
94 static TCGv_i64 psw_addr;
95 static TCGv_i64 psw_mask;
96 static TCGv_i64 gbea;
98 static TCGv_i32 cc_op;
99 static TCGv_i64 cc_src;
100 static TCGv_i64 cc_dst;
101 static TCGv_i64 cc_vr;
103 static char cpu_reg_names[32][4];
104 static TCGv_i64 regs[16];
105 static TCGv_i64 fregs[16];
107 void s390x_translate_init(void)
109 int i;
111 psw_addr = tcg_global_mem_new_i64(cpu_env,
112 offsetof(CPUS390XState, psw.addr),
113 "psw_addr");
114 psw_mask = tcg_global_mem_new_i64(cpu_env,
115 offsetof(CPUS390XState, psw.mask),
116 "psw_mask");
117 gbea = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUS390XState, gbea),
119 "gbea");
121 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
122 "cc_op");
123 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
124 "cc_src");
125 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
126 "cc_dst");
127 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
128 "cc_vr");
130 for (i = 0; i < 16; i++) {
131 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
132 regs[i] = tcg_global_mem_new(cpu_env,
133 offsetof(CPUS390XState, regs[i]),
134 cpu_reg_names[i]);
137 for (i = 0; i < 16; i++) {
138 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
139 fregs[i] = tcg_global_mem_new(cpu_env,
140 offsetof(CPUS390XState, vregs[i][0].d),
141 cpu_reg_names[i + 16]);
145 static TCGv_i64 load_reg(int reg)
147 TCGv_i64 r = tcg_temp_new_i64();
148 tcg_gen_mov_i64(r, regs[reg]);
149 return r;
152 static TCGv_i64 load_freg32_i64(int reg)
154 TCGv_i64 r = tcg_temp_new_i64();
155 tcg_gen_shri_i64(r, fregs[reg], 32);
156 return r;
159 static void store_reg(int reg, TCGv_i64 v)
161 tcg_gen_mov_i64(regs[reg], v);
164 static void store_freg(int reg, TCGv_i64 v)
166 tcg_gen_mov_i64(fregs[reg], v);
169 static void store_reg32_i64(int reg, TCGv_i64 v)
171 /* 32 bit register writes keep the upper half */
172 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
175 static void store_reg32h_i64(int reg, TCGv_i64 v)
177 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
180 static void store_freg32_i64(int reg, TCGv_i64 v)
182 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
185 static void return_low128(TCGv_i64 dest)
187 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
190 static void update_psw_addr(DisasContext *s)
192 /* psw.addr */
193 tcg_gen_movi_i64(psw_addr, s->pc);
196 static void per_branch(DisasContext *s, bool to_next)
198 #ifndef CONFIG_USER_ONLY
199 tcg_gen_movi_i64(gbea, s->pc);
201 if (s->tb->flags & FLAG_MASK_PER) {
202 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
203 gen_helper_per_branch(cpu_env, gbea, next_pc);
204 if (to_next) {
205 tcg_temp_free_i64(next_pc);
208 #endif
211 static void per_branch_cond(DisasContext *s, TCGCond cond,
212 TCGv_i64 arg1, TCGv_i64 arg2)
214 #ifndef CONFIG_USER_ONLY
215 if (s->tb->flags & FLAG_MASK_PER) {
216 TCGLabel *lab = gen_new_label();
217 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
219 tcg_gen_movi_i64(gbea, s->pc);
220 gen_helper_per_branch(cpu_env, gbea, psw_addr);
222 gen_set_label(lab);
223 } else {
224 TCGv_i64 pc = tcg_const_i64(s->pc);
225 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
226 tcg_temp_free_i64(pc);
228 #endif
231 static void per_breaking_event(DisasContext *s)
233 tcg_gen_movi_i64(gbea, s->pc);
236 static void update_cc_op(DisasContext *s)
238 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
239 tcg_gen_movi_i32(cc_op, s->cc_op);
243 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
245 return (uint64_t)cpu_lduw_code(env, pc);
248 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
250 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
253 static int get_mem_index(DisasContext *s)
255 if (!(s->tb->flags & FLAG_MASK_DAT)) {
256 return MMU_REAL_IDX;
259 switch (s->tb->flags & FLAG_MASK_ASC) {
260 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
261 return MMU_PRIMARY_IDX;
262 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
263 return MMU_SECONDARY_IDX;
264 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
265 return MMU_HOME_IDX;
266 default:
267 tcg_abort();
268 break;
272 static void gen_exception(int excp)
274 TCGv_i32 tmp = tcg_const_i32(excp);
275 gen_helper_exception(cpu_env, tmp);
276 tcg_temp_free_i32(tmp);
279 static void gen_program_exception(DisasContext *s, int code)
281 TCGv_i32 tmp;
283 /* Remember what pgm exeption this was. */
284 tmp = tcg_const_i32(code);
285 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
286 tcg_temp_free_i32(tmp);
288 tmp = tcg_const_i32(s->ilen);
289 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
290 tcg_temp_free_i32(tmp);
292 /* update the psw */
293 update_psw_addr(s);
295 /* Save off cc. */
296 update_cc_op(s);
298 /* Trigger exception. */
299 gen_exception(EXCP_PGM);
302 static inline void gen_illegal_opcode(DisasContext *s)
304 gen_program_exception(s, PGM_OPERATION);
307 static inline void gen_trap(DisasContext *s)
309 TCGv_i32 t;
311 /* Set DXC to 0xff. */
312 t = tcg_temp_new_i32();
313 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
314 tcg_gen_ori_i32(t, t, 0xff00);
315 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
316 tcg_temp_free_i32(t);
318 gen_program_exception(s, PGM_DATA);
321 #ifndef CONFIG_USER_ONLY
322 static void check_privileged(DisasContext *s)
324 if (s->tb->flags & FLAG_MASK_PSTATE) {
325 gen_program_exception(s, PGM_PRIVILEGED);
328 #endif
330 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
332 TCGv_i64 tmp = tcg_temp_new_i64();
333 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
335 /* Note that d2 is limited to 20 bits, signed. If we crop negative
336 displacements early we create larger immedate addends. */
338 /* Note that addi optimizes the imm==0 case. */
339 if (b2 && x2) {
340 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
341 tcg_gen_addi_i64(tmp, tmp, d2);
342 } else if (b2) {
343 tcg_gen_addi_i64(tmp, regs[b2], d2);
344 } else if (x2) {
345 tcg_gen_addi_i64(tmp, regs[x2], d2);
346 } else {
347 if (need_31) {
348 d2 &= 0x7fffffff;
349 need_31 = false;
351 tcg_gen_movi_i64(tmp, d2);
353 if (need_31) {
354 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
357 return tmp;
360 static inline bool live_cc_data(DisasContext *s)
362 return (s->cc_op != CC_OP_DYNAMIC
363 && s->cc_op != CC_OP_STATIC
364 && s->cc_op > 3);
367 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
369 if (live_cc_data(s)) {
370 tcg_gen_discard_i64(cc_src);
371 tcg_gen_discard_i64(cc_dst);
372 tcg_gen_discard_i64(cc_vr);
374 s->cc_op = CC_OP_CONST0 + val;
377 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_vr);
383 tcg_gen_mov_i64(cc_dst, dst);
384 s->cc_op = op;
387 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
388 TCGv_i64 dst)
390 if (live_cc_data(s)) {
391 tcg_gen_discard_i64(cc_vr);
393 tcg_gen_mov_i64(cc_src, src);
394 tcg_gen_mov_i64(cc_dst, dst);
395 s->cc_op = op;
398 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
399 TCGv_i64 dst, TCGv_i64 vr)
401 tcg_gen_mov_i64(cc_src, src);
402 tcg_gen_mov_i64(cc_dst, dst);
403 tcg_gen_mov_i64(cc_vr, vr);
404 s->cc_op = op;
407 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
409 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
412 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
414 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
417 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
419 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
422 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
424 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
427 /* CC value is in env->cc_op */
428 static void set_cc_static(DisasContext *s)
430 if (live_cc_data(s)) {
431 tcg_gen_discard_i64(cc_src);
432 tcg_gen_discard_i64(cc_dst);
433 tcg_gen_discard_i64(cc_vr);
435 s->cc_op = CC_OP_STATIC;
438 /* calculates cc into cc_op */
439 static void gen_op_calc_cc(DisasContext *s)
441 TCGv_i32 local_cc_op = NULL;
442 TCGv_i64 dummy = NULL;
444 switch (s->cc_op) {
445 default:
446 dummy = tcg_const_i64(0);
447 /* FALLTHRU */
448 case CC_OP_ADD_64:
449 case CC_OP_ADDU_64:
450 case CC_OP_ADDC_64:
451 case CC_OP_SUB_64:
452 case CC_OP_SUBU_64:
453 case CC_OP_SUBB_64:
454 case CC_OP_ADD_32:
455 case CC_OP_ADDU_32:
456 case CC_OP_ADDC_32:
457 case CC_OP_SUB_32:
458 case CC_OP_SUBU_32:
459 case CC_OP_SUBB_32:
460 local_cc_op = tcg_const_i32(s->cc_op);
461 break;
462 case CC_OP_CONST0:
463 case CC_OP_CONST1:
464 case CC_OP_CONST2:
465 case CC_OP_CONST3:
466 case CC_OP_STATIC:
467 case CC_OP_DYNAMIC:
468 break;
471 switch (s->cc_op) {
472 case CC_OP_CONST0:
473 case CC_OP_CONST1:
474 case CC_OP_CONST2:
475 case CC_OP_CONST3:
476 /* s->cc_op is the cc value */
477 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
478 break;
479 case CC_OP_STATIC:
480 /* env->cc_op already is the cc value */
481 break;
482 case CC_OP_NZ:
483 case CC_OP_ABS_64:
484 case CC_OP_NABS_64:
485 case CC_OP_ABS_32:
486 case CC_OP_NABS_32:
487 case CC_OP_LTGT0_32:
488 case CC_OP_LTGT0_64:
489 case CC_OP_COMP_32:
490 case CC_OP_COMP_64:
491 case CC_OP_NZ_F32:
492 case CC_OP_NZ_F64:
493 case CC_OP_FLOGR:
494 /* 1 argument */
495 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
496 break;
497 case CC_OP_ICM:
498 case CC_OP_LTGT_32:
499 case CC_OP_LTGT_64:
500 case CC_OP_LTUGTU_32:
501 case CC_OP_LTUGTU_64:
502 case CC_OP_TM_32:
503 case CC_OP_TM_64:
504 case CC_OP_SLA_32:
505 case CC_OP_SLA_64:
506 case CC_OP_NZ_F128:
507 /* 2 arguments */
508 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
509 break;
510 case CC_OP_ADD_64:
511 case CC_OP_ADDU_64:
512 case CC_OP_ADDC_64:
513 case CC_OP_SUB_64:
514 case CC_OP_SUBU_64:
515 case CC_OP_SUBB_64:
516 case CC_OP_ADD_32:
517 case CC_OP_ADDU_32:
518 case CC_OP_ADDC_32:
519 case CC_OP_SUB_32:
520 case CC_OP_SUBU_32:
521 case CC_OP_SUBB_32:
522 /* 3 arguments */
523 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
524 break;
525 case CC_OP_DYNAMIC:
526 /* unknown operation - assume 3 arguments and cc_op in env */
527 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
528 break;
529 default:
530 tcg_abort();
533 if (local_cc_op) {
534 tcg_temp_free_i32(local_cc_op);
536 if (dummy) {
537 tcg_temp_free_i64(dummy);
540 /* We now have cc in cc_op as constant */
541 set_cc_static(s);
544 static bool use_exit_tb(DisasContext *s)
546 return (s->singlestep_enabled ||
547 (tb_cflags(s->tb) & CF_LAST_IO) ||
548 (s->tb->flags & FLAG_MASK_PER));
551 static bool use_goto_tb(DisasContext *s, uint64_t dest)
553 if (unlikely(use_exit_tb(s))) {
554 return false;
556 #ifndef CONFIG_USER_ONLY
557 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
558 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
559 #else
560 return true;
561 #endif
564 static void account_noninline_branch(DisasContext *s, int cc_op)
566 #ifdef DEBUG_INLINE_BRANCHES
567 inline_branch_miss[cc_op]++;
568 #endif
571 static void account_inline_branch(DisasContext *s, int cc_op)
573 #ifdef DEBUG_INLINE_BRANCHES
574 inline_branch_hit[cc_op]++;
575 #endif
578 /* Table of mask values to comparison codes, given a comparison as input.
579 For such, CC=3 should not be possible. */
580 static const TCGCond ltgt_cond[16] = {
581 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
582 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
583 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
584 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
585 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
586 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
587 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
588 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
591 /* Table of mask values to comparison codes, given a logic op as input.
592 For such, only CC=0 and CC=1 should be possible. */
593 static const TCGCond nz_cond[16] = {
594 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
595 TCG_COND_NEVER, TCG_COND_NEVER,
596 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
597 TCG_COND_NE, TCG_COND_NE,
598 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
599 TCG_COND_EQ, TCG_COND_EQ,
600 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
601 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
604 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
605 details required to generate a TCG comparison. */
606 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
608 TCGCond cond;
609 enum cc_op old_cc_op = s->cc_op;
611 if (mask == 15 || mask == 0) {
612 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
613 c->u.s32.a = cc_op;
614 c->u.s32.b = cc_op;
615 c->g1 = c->g2 = true;
616 c->is_64 = false;
617 return;
620 /* Find the TCG condition for the mask + cc op. */
621 switch (old_cc_op) {
622 case CC_OP_LTGT0_32:
623 case CC_OP_LTGT0_64:
624 case CC_OP_LTGT_32:
625 case CC_OP_LTGT_64:
626 cond = ltgt_cond[mask];
627 if (cond == TCG_COND_NEVER) {
628 goto do_dynamic;
630 account_inline_branch(s, old_cc_op);
631 break;
633 case CC_OP_LTUGTU_32:
634 case CC_OP_LTUGTU_64:
635 cond = tcg_unsigned_cond(ltgt_cond[mask]);
636 if (cond == TCG_COND_NEVER) {
637 goto do_dynamic;
639 account_inline_branch(s, old_cc_op);
640 break;
642 case CC_OP_NZ:
643 cond = nz_cond[mask];
644 if (cond == TCG_COND_NEVER) {
645 goto do_dynamic;
647 account_inline_branch(s, old_cc_op);
648 break;
650 case CC_OP_TM_32:
651 case CC_OP_TM_64:
652 switch (mask) {
653 case 8:
654 cond = TCG_COND_EQ;
655 break;
656 case 4 | 2 | 1:
657 cond = TCG_COND_NE;
658 break;
659 default:
660 goto do_dynamic;
662 account_inline_branch(s, old_cc_op);
663 break;
665 case CC_OP_ICM:
666 switch (mask) {
667 case 8:
668 cond = TCG_COND_EQ;
669 break;
670 case 4 | 2 | 1:
671 case 4 | 2:
672 cond = TCG_COND_NE;
673 break;
674 default:
675 goto do_dynamic;
677 account_inline_branch(s, old_cc_op);
678 break;
680 case CC_OP_FLOGR:
681 switch (mask & 0xa) {
682 case 8: /* src == 0 -> no one bit found */
683 cond = TCG_COND_EQ;
684 break;
685 case 2: /* src != 0 -> one bit found */
686 cond = TCG_COND_NE;
687 break;
688 default:
689 goto do_dynamic;
691 account_inline_branch(s, old_cc_op);
692 break;
694 case CC_OP_ADDU_32:
695 case CC_OP_ADDU_64:
696 switch (mask) {
697 case 8 | 2: /* vr == 0 */
698 cond = TCG_COND_EQ;
699 break;
700 case 4 | 1: /* vr != 0 */
701 cond = TCG_COND_NE;
702 break;
703 case 8 | 4: /* no carry -> vr >= src */
704 cond = TCG_COND_GEU;
705 break;
706 case 2 | 1: /* carry -> vr < src */
707 cond = TCG_COND_LTU;
708 break;
709 default:
710 goto do_dynamic;
712 account_inline_branch(s, old_cc_op);
713 break;
715 case CC_OP_SUBU_32:
716 case CC_OP_SUBU_64:
717 /* Note that CC=0 is impossible; treat it as dont-care. */
718 switch (mask & 7) {
719 case 2: /* zero -> op1 == op2 */
720 cond = TCG_COND_EQ;
721 break;
722 case 4 | 1: /* !zero -> op1 != op2 */
723 cond = TCG_COND_NE;
724 break;
725 case 4: /* borrow (!carry) -> op1 < op2 */
726 cond = TCG_COND_LTU;
727 break;
728 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
729 cond = TCG_COND_GEU;
730 break;
731 default:
732 goto do_dynamic;
734 account_inline_branch(s, old_cc_op);
735 break;
737 default:
738 do_dynamic:
739 /* Calculate cc value. */
740 gen_op_calc_cc(s);
741 /* FALLTHRU */
743 case CC_OP_STATIC:
744 /* Jump based on CC. We'll load up the real cond below;
745 the assignment here merely avoids a compiler warning. */
746 account_noninline_branch(s, old_cc_op);
747 old_cc_op = CC_OP_STATIC;
748 cond = TCG_COND_NEVER;
749 break;
752 /* Load up the arguments of the comparison. */
753 c->is_64 = true;
754 c->g1 = c->g2 = false;
755 switch (old_cc_op) {
756 case CC_OP_LTGT0_32:
757 c->is_64 = false;
758 c->u.s32.a = tcg_temp_new_i32();
759 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
760 c->u.s32.b = tcg_const_i32(0);
761 break;
762 case CC_OP_LTGT_32:
763 case CC_OP_LTUGTU_32:
764 case CC_OP_SUBU_32:
765 c->is_64 = false;
766 c->u.s32.a = tcg_temp_new_i32();
767 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
768 c->u.s32.b = tcg_temp_new_i32();
769 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
770 break;
772 case CC_OP_LTGT0_64:
773 case CC_OP_NZ:
774 case CC_OP_FLOGR:
775 c->u.s64.a = cc_dst;
776 c->u.s64.b = tcg_const_i64(0);
777 c->g1 = true;
778 break;
779 case CC_OP_LTGT_64:
780 case CC_OP_LTUGTU_64:
781 case CC_OP_SUBU_64:
782 c->u.s64.a = cc_src;
783 c->u.s64.b = cc_dst;
784 c->g1 = c->g2 = true;
785 break;
787 case CC_OP_TM_32:
788 case CC_OP_TM_64:
789 case CC_OP_ICM:
790 c->u.s64.a = tcg_temp_new_i64();
791 c->u.s64.b = tcg_const_i64(0);
792 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
793 break;
795 case CC_OP_ADDU_32:
796 c->is_64 = false;
797 c->u.s32.a = tcg_temp_new_i32();
798 c->u.s32.b = tcg_temp_new_i32();
799 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
800 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
801 tcg_gen_movi_i32(c->u.s32.b, 0);
802 } else {
803 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
805 break;
807 case CC_OP_ADDU_64:
808 c->u.s64.a = cc_vr;
809 c->g1 = true;
810 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
811 c->u.s64.b = tcg_const_i64(0);
812 } else {
813 c->u.s64.b = cc_src;
814 c->g2 = true;
816 break;
818 case CC_OP_STATIC:
819 c->is_64 = false;
820 c->u.s32.a = cc_op;
821 c->g1 = true;
822 switch (mask) {
823 case 0x8 | 0x4 | 0x2: /* cc != 3 */
824 cond = TCG_COND_NE;
825 c->u.s32.b = tcg_const_i32(3);
826 break;
827 case 0x8 | 0x4 | 0x1: /* cc != 2 */
828 cond = TCG_COND_NE;
829 c->u.s32.b = tcg_const_i32(2);
830 break;
831 case 0x8 | 0x2 | 0x1: /* cc != 1 */
832 cond = TCG_COND_NE;
833 c->u.s32.b = tcg_const_i32(1);
834 break;
835 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
836 cond = TCG_COND_EQ;
837 c->g1 = false;
838 c->u.s32.a = tcg_temp_new_i32();
839 c->u.s32.b = tcg_const_i32(0);
840 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
841 break;
842 case 0x8 | 0x4: /* cc < 2 */
843 cond = TCG_COND_LTU;
844 c->u.s32.b = tcg_const_i32(2);
845 break;
846 case 0x8: /* cc == 0 */
847 cond = TCG_COND_EQ;
848 c->u.s32.b = tcg_const_i32(0);
849 break;
850 case 0x4 | 0x2 | 0x1: /* cc != 0 */
851 cond = TCG_COND_NE;
852 c->u.s32.b = tcg_const_i32(0);
853 break;
854 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
855 cond = TCG_COND_NE;
856 c->g1 = false;
857 c->u.s32.a = tcg_temp_new_i32();
858 c->u.s32.b = tcg_const_i32(0);
859 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
860 break;
861 case 0x4: /* cc == 1 */
862 cond = TCG_COND_EQ;
863 c->u.s32.b = tcg_const_i32(1);
864 break;
865 case 0x2 | 0x1: /* cc > 1 */
866 cond = TCG_COND_GTU;
867 c->u.s32.b = tcg_const_i32(1);
868 break;
869 case 0x2: /* cc == 2 */
870 cond = TCG_COND_EQ;
871 c->u.s32.b = tcg_const_i32(2);
872 break;
873 case 0x1: /* cc == 3 */
874 cond = TCG_COND_EQ;
875 c->u.s32.b = tcg_const_i32(3);
876 break;
877 default:
878 /* CC is masked by something else: (8 >> cc) & mask. */
879 cond = TCG_COND_NE;
880 c->g1 = false;
881 c->u.s32.a = tcg_const_i32(8);
882 c->u.s32.b = tcg_const_i32(0);
883 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
884 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
885 break;
887 break;
889 default:
890 abort();
892 c->cond = cond;
895 static void free_compare(DisasCompare *c)
897 if (!c->g1) {
898 if (c->is_64) {
899 tcg_temp_free_i64(c->u.s64.a);
900 } else {
901 tcg_temp_free_i32(c->u.s32.a);
904 if (!c->g2) {
905 if (c->is_64) {
906 tcg_temp_free_i64(c->u.s64.b);
907 } else {
908 tcg_temp_free_i32(c->u.s32.b);
913 /* ====================================================================== */
914 /* Define the insn format enumeration. */
915 #define F0(N) FMT_##N,
916 #define F1(N, X1) F0(N)
917 #define F2(N, X1, X2) F0(N)
918 #define F3(N, X1, X2, X3) F0(N)
919 #define F4(N, X1, X2, X3, X4) F0(N)
920 #define F5(N, X1, X2, X3, X4, X5) F0(N)
922 typedef enum {
923 #include "insn-format.def"
924 } DisasFormat;
926 #undef F0
927 #undef F1
928 #undef F2
929 #undef F3
930 #undef F4
931 #undef F5
933 /* Define a structure to hold the decoded fields. We'll store each inside
934 an array indexed by an enum. In order to conserve memory, we'll arrange
935 for fields that do not exist at the same time to overlap, thus the "C"
936 for compact. For checking purposes there is an "O" for original index
937 as well that will be applied to availability bitmaps. */
939 enum DisasFieldIndexO {
940 FLD_O_r1,
941 FLD_O_r2,
942 FLD_O_r3,
943 FLD_O_m1,
944 FLD_O_m3,
945 FLD_O_m4,
946 FLD_O_b1,
947 FLD_O_b2,
948 FLD_O_b4,
949 FLD_O_d1,
950 FLD_O_d2,
951 FLD_O_d4,
952 FLD_O_x2,
953 FLD_O_l1,
954 FLD_O_l2,
955 FLD_O_i1,
956 FLD_O_i2,
957 FLD_O_i3,
958 FLD_O_i4,
959 FLD_O_i5
962 enum DisasFieldIndexC {
963 FLD_C_r1 = 0,
964 FLD_C_m1 = 0,
965 FLD_C_b1 = 0,
966 FLD_C_i1 = 0,
968 FLD_C_r2 = 1,
969 FLD_C_b2 = 1,
970 FLD_C_i2 = 1,
972 FLD_C_r3 = 2,
973 FLD_C_m3 = 2,
974 FLD_C_i3 = 2,
976 FLD_C_m4 = 3,
977 FLD_C_b4 = 3,
978 FLD_C_i4 = 3,
979 FLD_C_l1 = 3,
981 FLD_C_i5 = 4,
982 FLD_C_d1 = 4,
984 FLD_C_d2 = 5,
986 FLD_C_d4 = 6,
987 FLD_C_x2 = 6,
988 FLD_C_l2 = 6,
990 NUM_C_FIELD = 7
993 struct DisasFields {
994 uint64_t raw_insn;
995 unsigned op:8;
996 unsigned op2:8;
997 unsigned presentC:16;
998 unsigned int presentO;
999 int c[NUM_C_FIELD];
1002 /* This is the way fields are to be accessed out of DisasFields. */
1003 #define have_field(S, F) have_field1((S), FLD_O_##F)
1004 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1006 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1008 return (f->presentO >> c) & 1;
1011 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1012 enum DisasFieldIndexC c)
1014 assert(have_field1(f, o));
1015 return f->c[c];
1018 /* Describe the layout of each field in each format. */
1019 typedef struct DisasField {
1020 unsigned int beg:8;
1021 unsigned int size:8;
1022 unsigned int type:2;
1023 unsigned int indexC:6;
1024 enum DisasFieldIndexO indexO:8;
1025 } DisasField;
1027 typedef struct DisasFormatInfo {
1028 DisasField op[NUM_C_FIELD];
1029 } DisasFormatInfo;
1031 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1032 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1033 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1044 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1046 #define F0(N) { { } },
1047 #define F1(N, X1) { { X1 } },
1048 #define F2(N, X1, X2) { { X1, X2 } },
1049 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1050 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1051 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1053 static const DisasFormatInfo format_info[] = {
1054 #include "insn-format.def"
1057 #undef F0
1058 #undef F1
1059 #undef F2
1060 #undef F3
1061 #undef F4
1062 #undef F5
1063 #undef R
1064 #undef M
1065 #undef BD
1066 #undef BXD
1067 #undef BDL
1068 #undef BXDL
1069 #undef I
1070 #undef L
1072 /* Generally, we'll extract operands into this structures, operate upon
1073 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1074 of routines below for more details. */
1075 typedef struct {
1076 bool g_out, g_out2, g_in1, g_in2;
1077 TCGv_i64 out, out2, in1, in2;
1078 TCGv_i64 addr1;
1079 } DisasOps;
1081 /* Instructions can place constraints on their operands, raising specification
1082 exceptions if they are violated. To make this easy to automate, each "in1",
1083 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084 of the following, or 0. To make this easy to document, we'll put the
1085 SPEC_<name> defines next to <name>. */
1087 #define SPEC_r1_even 1
1088 #define SPEC_r2_even 2
1089 #define SPEC_r3_even 4
1090 #define SPEC_r1_f128 8
1091 #define SPEC_r2_f128 16
1093 /* Return values from translate_one, indicating the state of the TB. */
1094 typedef enum {
1095 /* Continue the TB. */
1096 NO_EXIT,
1097 /* We have emitted one or more goto_tb. No fixup required. */
1098 EXIT_GOTO_TB,
1099 /* We are not using a goto_tb (for whatever reason), but have updated
1100 the PC (for whatever reason), so there's no need to do it again on
1101 exiting the TB. */
1102 EXIT_PC_UPDATED,
1103 /* We have updated the PC and CC values. */
1104 EXIT_PC_CC_UPDATED,
1105 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1106 updated the PC for the next instruction to be executed. */
1107 EXIT_PC_STALE,
1108 /* We are exiting the TB to the main loop. */
1109 EXIT_PC_STALE_NOCHAIN,
1110 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1111 No following code will be executed. */
1112 EXIT_NORETURN,
1113 } ExitStatus;
1115 struct DisasInsn {
1116 unsigned opc:16;
1117 DisasFormat fmt:8;
1118 unsigned fac:8;
1119 unsigned spec:8;
1121 const char *name;
1123 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1124 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1125 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1126 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1127 void (*help_cout)(DisasContext *, DisasOps *);
1128 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1130 uint64_t data;
1133 /* ====================================================================== */
1134 /* Miscellaneous helpers, used by several operations. */
1136 static void help_l2_shift(DisasContext *s, DisasFields *f,
1137 DisasOps *o, int mask)
1139 int b2 = get_field(f, b2);
1140 int d2 = get_field(f, d2);
1142 if (b2 == 0) {
1143 o->in2 = tcg_const_i64(d2 & mask);
1144 } else {
1145 o->in2 = get_address(s, 0, b2, d2);
1146 tcg_gen_andi_i64(o->in2, o->in2, mask);
1150 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1152 if (dest == s->next_pc) {
1153 per_branch(s, true);
1154 return NO_EXIT;
1156 if (use_goto_tb(s, dest)) {
1157 update_cc_op(s);
1158 per_breaking_event(s);
1159 tcg_gen_goto_tb(0);
1160 tcg_gen_movi_i64(psw_addr, dest);
1161 tcg_gen_exit_tb((uintptr_t)s->tb);
1162 return EXIT_GOTO_TB;
1163 } else {
1164 tcg_gen_movi_i64(psw_addr, dest);
1165 per_branch(s, false);
1166 return EXIT_PC_UPDATED;
1170 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1171 bool is_imm, int imm, TCGv_i64 cdest)
1173 ExitStatus ret;
1174 uint64_t dest = s->pc + 2 * imm;
1175 TCGLabel *lab;
1177 /* Take care of the special cases first. */
1178 if (c->cond == TCG_COND_NEVER) {
1179 ret = NO_EXIT;
1180 goto egress;
1182 if (is_imm) {
1183 if (dest == s->next_pc) {
1184 /* Branch to next. */
1185 per_branch(s, true);
1186 ret = NO_EXIT;
1187 goto egress;
1189 if (c->cond == TCG_COND_ALWAYS) {
1190 ret = help_goto_direct(s, dest);
1191 goto egress;
1193 } else {
1194 if (!cdest) {
1195 /* E.g. bcr %r0 -> no branch. */
1196 ret = NO_EXIT;
1197 goto egress;
1199 if (c->cond == TCG_COND_ALWAYS) {
1200 tcg_gen_mov_i64(psw_addr, cdest);
1201 per_branch(s, false);
1202 ret = EXIT_PC_UPDATED;
1203 goto egress;
1207 if (use_goto_tb(s, s->next_pc)) {
1208 if (is_imm && use_goto_tb(s, dest)) {
1209 /* Both exits can use goto_tb. */
1210 update_cc_op(s);
1212 lab = gen_new_label();
1213 if (c->is_64) {
1214 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1215 } else {
1216 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1219 /* Branch not taken. */
1220 tcg_gen_goto_tb(0);
1221 tcg_gen_movi_i64(psw_addr, s->next_pc);
1222 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1224 /* Branch taken. */
1225 gen_set_label(lab);
1226 per_breaking_event(s);
1227 tcg_gen_goto_tb(1);
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1231 ret = EXIT_GOTO_TB;
1232 } else {
1233 /* Fallthru can use goto_tb, but taken branch cannot. */
1234 /* Store taken branch destination before the brcond. This
1235 avoids having to allocate a new local temp to hold it.
1236 We'll overwrite this in the not taken case anyway. */
1237 if (!is_imm) {
1238 tcg_gen_mov_i64(psw_addr, cdest);
1241 lab = gen_new_label();
1242 if (c->is_64) {
1243 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1244 } else {
1245 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1248 /* Branch not taken. */
1249 update_cc_op(s);
1250 tcg_gen_goto_tb(0);
1251 tcg_gen_movi_i64(psw_addr, s->next_pc);
1252 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1254 gen_set_label(lab);
1255 if (is_imm) {
1256 tcg_gen_movi_i64(psw_addr, dest);
1258 per_breaking_event(s);
1259 ret = EXIT_PC_UPDATED;
1261 } else {
1262 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1263 Most commonly we're single-stepping or some other condition that
1264 disables all use of goto_tb. Just update the PC and exit. */
1266 TCGv_i64 next = tcg_const_i64(s->next_pc);
1267 if (is_imm) {
1268 cdest = tcg_const_i64(dest);
1271 if (c->is_64) {
1272 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1273 cdest, next);
1274 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1275 } else {
1276 TCGv_i32 t0 = tcg_temp_new_i32();
1277 TCGv_i64 t1 = tcg_temp_new_i64();
1278 TCGv_i64 z = tcg_const_i64(0);
1279 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1280 tcg_gen_extu_i32_i64(t1, t0);
1281 tcg_temp_free_i32(t0);
1282 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1283 per_branch_cond(s, TCG_COND_NE, t1, z);
1284 tcg_temp_free_i64(t1);
1285 tcg_temp_free_i64(z);
1288 if (is_imm) {
1289 tcg_temp_free_i64(cdest);
1291 tcg_temp_free_i64(next);
1293 ret = EXIT_PC_UPDATED;
1296 egress:
1297 free_compare(c);
1298 return ret;
1301 /* ====================================================================== */
1302 /* The operations. These perform the bulk of the work for any insn,
1303 usually after the operands have been loaded and output initialized. */
1305 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1307 TCGv_i64 z, n;
1308 z = tcg_const_i64(0);
1309 n = tcg_temp_new_i64();
1310 tcg_gen_neg_i64(n, o->in2);
1311 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1312 tcg_temp_free_i64(n);
1313 tcg_temp_free_i64(z);
1314 return NO_EXIT;
1317 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1319 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1320 return NO_EXIT;
1323 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1325 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1326 return NO_EXIT;
1329 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1331 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1332 tcg_gen_mov_i64(o->out2, o->in2);
1333 return NO_EXIT;
1336 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1338 tcg_gen_add_i64(o->out, o->in1, o->in2);
1339 return NO_EXIT;
1342 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1344 DisasCompare cmp;
1345 TCGv_i64 carry;
1347 tcg_gen_add_i64(o->out, o->in1, o->in2);
1349 /* The carry flag is the msb of CC, therefore the branch mask that would
1350 create that comparison is 3. Feeding the generated comparison to
1351 setcond produces the carry flag that we desire. */
1352 disas_jcc(s, &cmp, 3);
1353 carry = tcg_temp_new_i64();
1354 if (cmp.is_64) {
1355 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1356 } else {
1357 TCGv_i32 t = tcg_temp_new_i32();
1358 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1359 tcg_gen_extu_i32_i64(carry, t);
1360 tcg_temp_free_i32(t);
1362 free_compare(&cmp);
1364 tcg_gen_add_i64(o->out, o->out, carry);
1365 tcg_temp_free_i64(carry);
1366 return NO_EXIT;
1369 static ExitStatus op_asi(DisasContext *s, DisasOps *o)
1371 o->in1 = tcg_temp_new_i64();
1373 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1374 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1375 } else {
1376 /* Perform the atomic addition in memory. */
1377 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1378 s->insn->data);
1381 /* Recompute also for atomic case: needed for setting CC. */
1382 tcg_gen_add_i64(o->out, o->in1, o->in2);
1384 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1385 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1387 return NO_EXIT;
1390 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1392 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1393 return NO_EXIT;
1396 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1398 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1399 return NO_EXIT;
1402 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1404 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1405 return_low128(o->out2);
1406 return NO_EXIT;
1409 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1411 tcg_gen_and_i64(o->out, o->in1, o->in2);
1412 return NO_EXIT;
1415 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1417 int shift = s->insn->data & 0xff;
1418 int size = s->insn->data >> 8;
1419 uint64_t mask = ((1ull << size) - 1) << shift;
1421 assert(!o->g_in2);
1422 tcg_gen_shli_i64(o->in2, o->in2, shift);
1423 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1424 tcg_gen_and_i64(o->out, o->in1, o->in2);
1426 /* Produce the CC from only the bits manipulated. */
1427 tcg_gen_andi_i64(cc_dst, o->out, mask);
1428 set_cc_nz_u64(s, cc_dst);
1429 return NO_EXIT;
1432 static ExitStatus op_ni(DisasContext *s, DisasOps *o)
1434 o->in1 = tcg_temp_new_i64();
1436 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1437 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1438 } else {
1439 /* Perform the atomic operation in memory. */
1440 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1441 s->insn->data);
1444 /* Recompute also for atomic case: needed for setting CC. */
1445 tcg_gen_and_i64(o->out, o->in1, o->in2);
1447 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1448 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1450 return NO_EXIT;
1453 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1455 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1456 if (o->in2) {
1457 tcg_gen_mov_i64(psw_addr, o->in2);
1458 per_branch(s, false);
1459 return EXIT_PC_UPDATED;
1460 } else {
1461 return NO_EXIT;
1465 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1467 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1468 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1471 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1473 int m1 = get_field(s->fields, m1);
1474 bool is_imm = have_field(s->fields, i2);
1475 int imm = is_imm ? get_field(s->fields, i2) : 0;
1476 DisasCompare c;
1478 /* BCR with R2 = 0 causes no branching */
1479 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1480 if (m1 == 14) {
1481 /* Perform serialization */
1482 /* FIXME: check for fast-BCR-serialization facility */
1483 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1485 if (m1 == 15) {
1486 /* Perform serialization */
1487 /* FIXME: perform checkpoint-synchronisation */
1488 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1490 return NO_EXIT;
1493 disas_jcc(s, &c, m1);
1494 return help_branch(s, &c, is_imm, imm, o->in2);
1497 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1499 int r1 = get_field(s->fields, r1);
1500 bool is_imm = have_field(s->fields, i2);
1501 int imm = is_imm ? get_field(s->fields, i2) : 0;
1502 DisasCompare c;
1503 TCGv_i64 t;
1505 c.cond = TCG_COND_NE;
1506 c.is_64 = false;
1507 c.g1 = false;
1508 c.g2 = false;
1510 t = tcg_temp_new_i64();
1511 tcg_gen_subi_i64(t, regs[r1], 1);
1512 store_reg32_i64(r1, t);
1513 c.u.s32.a = tcg_temp_new_i32();
1514 c.u.s32.b = tcg_const_i32(0);
1515 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1516 tcg_temp_free_i64(t);
1518 return help_branch(s, &c, is_imm, imm, o->in2);
1521 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1523 int r1 = get_field(s->fields, r1);
1524 int imm = get_field(s->fields, i2);
1525 DisasCompare c;
1526 TCGv_i64 t;
1528 c.cond = TCG_COND_NE;
1529 c.is_64 = false;
1530 c.g1 = false;
1531 c.g2 = false;
1533 t = tcg_temp_new_i64();
1534 tcg_gen_shri_i64(t, regs[r1], 32);
1535 tcg_gen_subi_i64(t, t, 1);
1536 store_reg32h_i64(r1, t);
1537 c.u.s32.a = tcg_temp_new_i32();
1538 c.u.s32.b = tcg_const_i32(0);
1539 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1540 tcg_temp_free_i64(t);
1542 return help_branch(s, &c, 1, imm, o->in2);
1545 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1547 int r1 = get_field(s->fields, r1);
1548 bool is_imm = have_field(s->fields, i2);
1549 int imm = is_imm ? get_field(s->fields, i2) : 0;
1550 DisasCompare c;
1552 c.cond = TCG_COND_NE;
1553 c.is_64 = true;
1554 c.g1 = true;
1555 c.g2 = false;
1557 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1558 c.u.s64.a = regs[r1];
1559 c.u.s64.b = tcg_const_i64(0);
1561 return help_branch(s, &c, is_imm, imm, o->in2);
1564 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1566 int r1 = get_field(s->fields, r1);
1567 int r3 = get_field(s->fields, r3);
1568 bool is_imm = have_field(s->fields, i2);
1569 int imm = is_imm ? get_field(s->fields, i2) : 0;
1570 DisasCompare c;
1571 TCGv_i64 t;
1573 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1574 c.is_64 = false;
1575 c.g1 = false;
1576 c.g2 = false;
1578 t = tcg_temp_new_i64();
1579 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1580 c.u.s32.a = tcg_temp_new_i32();
1581 c.u.s32.b = tcg_temp_new_i32();
1582 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1583 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1584 store_reg32_i64(r1, t);
1585 tcg_temp_free_i64(t);
1587 return help_branch(s, &c, is_imm, imm, o->in2);
1590 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1592 int r1 = get_field(s->fields, r1);
1593 int r3 = get_field(s->fields, r3);
1594 bool is_imm = have_field(s->fields, i2);
1595 int imm = is_imm ? get_field(s->fields, i2) : 0;
1596 DisasCompare c;
1598 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1599 c.is_64 = true;
1601 if (r1 == (r3 | 1)) {
1602 c.u.s64.b = load_reg(r3 | 1);
1603 c.g2 = false;
1604 } else {
1605 c.u.s64.b = regs[r3 | 1];
1606 c.g2 = true;
1609 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1610 c.u.s64.a = regs[r1];
1611 c.g1 = true;
1613 return help_branch(s, &c, is_imm, imm, o->in2);
1616 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1618 int imm, m3 = get_field(s->fields, m3);
1619 bool is_imm;
1620 DisasCompare c;
1622 c.cond = ltgt_cond[m3];
1623 if (s->insn->data) {
1624 c.cond = tcg_unsigned_cond(c.cond);
1626 c.is_64 = c.g1 = c.g2 = true;
1627 c.u.s64.a = o->in1;
1628 c.u.s64.b = o->in2;
1630 is_imm = have_field(s->fields, i4);
1631 if (is_imm) {
1632 imm = get_field(s->fields, i4);
1633 } else {
1634 imm = 0;
1635 o->out = get_address(s, 0, get_field(s->fields, b4),
1636 get_field(s->fields, d4));
1639 return help_branch(s, &c, is_imm, imm, o->out);
1642 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1644 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1645 set_cc_static(s);
1646 return NO_EXIT;
1649 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1651 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1652 set_cc_static(s);
1653 return NO_EXIT;
1656 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1658 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1659 set_cc_static(s);
1660 return NO_EXIT;
1663 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1665 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1666 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1667 tcg_temp_free_i32(m3);
1668 gen_set_cc_nz_f32(s, o->in2);
1669 return NO_EXIT;
1672 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1674 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1675 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1676 tcg_temp_free_i32(m3);
1677 gen_set_cc_nz_f64(s, o->in2);
1678 return NO_EXIT;
1681 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1683 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1684 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1685 tcg_temp_free_i32(m3);
1686 gen_set_cc_nz_f128(s, o->in1, o->in2);
1687 return NO_EXIT;
1690 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 gen_set_cc_nz_f32(s, o->in2);
1696 return NO_EXIT;
1699 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 gen_set_cc_nz_f64(s, o->in2);
1705 return NO_EXIT;
1708 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 gen_set_cc_nz_f128(s, o->in1, o->in2);
1714 return NO_EXIT;
1717 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 gen_set_cc_nz_f32(s, o->in2);
1723 return NO_EXIT;
1726 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1728 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1730 tcg_temp_free_i32(m3);
1731 gen_set_cc_nz_f64(s, o->in2);
1732 return NO_EXIT;
1735 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1737 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1739 tcg_temp_free_i32(m3);
1740 gen_set_cc_nz_f128(s, o->in1, o->in2);
1741 return NO_EXIT;
1744 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1749 gen_set_cc_nz_f32(s, o->in2);
1750 return NO_EXIT;
1753 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 gen_set_cc_nz_f64(s, o->in2);
1759 return NO_EXIT;
1762 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 gen_set_cc_nz_f128(s, o->in1, o->in2);
1768 return NO_EXIT;
1771 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1773 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1775 tcg_temp_free_i32(m3);
1776 return NO_EXIT;
1779 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 return NO_EXIT;
1787 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1789 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1790 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1791 tcg_temp_free_i32(m3);
1792 return_low128(o->out2);
1793 return NO_EXIT;
1796 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1800 tcg_temp_free_i32(m3);
1801 return NO_EXIT;
1804 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1806 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1807 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1808 tcg_temp_free_i32(m3);
1809 return NO_EXIT;
1812 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1814 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1815 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1816 tcg_temp_free_i32(m3);
1817 return_low128(o->out2);
1818 return NO_EXIT;
1821 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1823 int r2 = get_field(s->fields, r2);
1824 TCGv_i64 len = tcg_temp_new_i64();
1826 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1827 set_cc_static(s);
1828 return_low128(o->out);
1830 tcg_gen_add_i64(regs[r2], regs[r2], len);
1831 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1832 tcg_temp_free_i64(len);
1834 return NO_EXIT;
1837 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1839 int l = get_field(s->fields, l1);
1840 TCGv_i32 vl;
1842 switch (l + 1) {
1843 case 1:
1844 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1845 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1846 break;
1847 case 2:
1848 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1849 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1850 break;
1851 case 4:
1852 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1853 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1854 break;
1855 case 8:
1856 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1857 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1858 break;
1859 default:
1860 vl = tcg_const_i32(l);
1861 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1862 tcg_temp_free_i32(vl);
1863 set_cc_static(s);
1864 return NO_EXIT;
1866 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1867 return NO_EXIT;
1870 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1872 int r1 = get_field(s->fields, r1);
1873 int r2 = get_field(s->fields, r2);
1874 TCGv_i32 t1, t2;
1876 /* r1 and r2 must be even. */
1877 if (r1 & 1 || r2 & 1) {
1878 gen_program_exception(s, PGM_SPECIFICATION);
1879 return EXIT_NORETURN;
1882 t1 = tcg_const_i32(r1);
1883 t2 = tcg_const_i32(r2);
1884 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1885 tcg_temp_free_i32(t1);
1886 tcg_temp_free_i32(t2);
1887 set_cc_static(s);
1888 return NO_EXIT;
1891 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1893 int r1 = get_field(s->fields, r1);
1894 int r3 = get_field(s->fields, r3);
1895 TCGv_i32 t1, t3;
1897 /* r1 and r3 must be even. */
1898 if (r1 & 1 || r3 & 1) {
1899 gen_program_exception(s, PGM_SPECIFICATION);
1900 return EXIT_NORETURN;
1903 t1 = tcg_const_i32(r1);
1904 t3 = tcg_const_i32(r3);
1905 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1906 tcg_temp_free_i32(t1);
1907 tcg_temp_free_i32(t3);
1908 set_cc_static(s);
1909 return NO_EXIT;
1912 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1914 int r1 = get_field(s->fields, r1);
1915 int r3 = get_field(s->fields, r3);
1916 TCGv_i32 t1, t3;
1918 /* r1 and r3 must be even. */
1919 if (r1 & 1 || r3 & 1) {
1920 gen_program_exception(s, PGM_SPECIFICATION);
1921 return EXIT_NORETURN;
1924 t1 = tcg_const_i32(r1);
1925 t3 = tcg_const_i32(r3);
1926 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1927 tcg_temp_free_i32(t1);
1928 tcg_temp_free_i32(t3);
1929 set_cc_static(s);
1930 return NO_EXIT;
1933 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1935 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1936 TCGv_i32 t1 = tcg_temp_new_i32();
1937 tcg_gen_extrl_i64_i32(t1, o->in1);
1938 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1939 set_cc_static(s);
1940 tcg_temp_free_i32(t1);
1941 tcg_temp_free_i32(m3);
1942 return NO_EXIT;
1945 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1947 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1948 set_cc_static(s);
1949 return_low128(o->in2);
1950 return NO_EXIT;
1953 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1955 TCGv_i64 t = tcg_temp_new_i64();
1956 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1957 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1958 tcg_gen_or_i64(o->out, o->out, t);
1959 tcg_temp_free_i64(t);
1960 return NO_EXIT;
1963 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1965 int d2 = get_field(s->fields, d2);
1966 int b2 = get_field(s->fields, b2);
1967 TCGv_i64 addr, cc;
1969 /* Note that in1 = R3 (new value) and
1970 in2 = (zero-extended) R1 (expected value). */
1972 addr = get_address(s, 0, b2, d2);
1973 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1974 get_mem_index(s), s->insn->data | MO_ALIGN);
1975 tcg_temp_free_i64(addr);
1977 /* Are the memory and expected values (un)equal? Note that this setcond
1978 produces the output CC value, thus the NE sense of the test. */
1979 cc = tcg_temp_new_i64();
1980 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1981 tcg_gen_extrl_i64_i32(cc_op, cc);
1982 tcg_temp_free_i64(cc);
1983 set_cc_static(s);
1985 return NO_EXIT;
1988 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1990 int r1 = get_field(s->fields, r1);
1991 int r3 = get_field(s->fields, r3);
1992 int d2 = get_field(s->fields, d2);
1993 int b2 = get_field(s->fields, b2);
1994 TCGv_i64 addr;
1995 TCGv_i32 t_r1, t_r3;
1997 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1998 addr = get_address(s, 0, b2, d2);
1999 t_r1 = tcg_const_i32(r1);
2000 t_r3 = tcg_const_i32(r3);
2001 if (tb_cflags(s->tb) & CF_PARALLEL) {
2002 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2003 } else {
2004 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2006 tcg_temp_free_i64(addr);
2007 tcg_temp_free_i32(t_r1);
2008 tcg_temp_free_i32(t_r3);
2010 set_cc_static(s);
2011 return NO_EXIT;
2014 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
2016 int r3 = get_field(s->fields, r3);
2017 TCGv_i32 t_r3 = tcg_const_i32(r3);
2019 if (tb_cflags(s->tb) & CF_PARALLEL) {
2020 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
2021 } else {
2022 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
2024 tcg_temp_free_i32(t_r3);
2026 set_cc_static(s);
2027 return NO_EXIT;
2030 #ifndef CONFIG_USER_ONLY
2031 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2033 TCGMemOp mop = s->insn->data;
2034 TCGv_i64 addr, old, cc;
2035 TCGLabel *lab = gen_new_label();
2037 /* Note that in1 = R1 (zero-extended expected value),
2038 out = R1 (original reg), out2 = R1+1 (new value). */
2040 check_privileged(s);
2041 addr = tcg_temp_new_i64();
2042 old = tcg_temp_new_i64();
2043 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2044 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2045 get_mem_index(s), mop | MO_ALIGN);
2046 tcg_temp_free_i64(addr);
2048 /* Are the memory and expected values (un)equal? */
2049 cc = tcg_temp_new_i64();
2050 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2051 tcg_gen_extrl_i64_i32(cc_op, cc);
2053 /* Write back the output now, so that it happens before the
2054 following branch, so that we don't need local temps. */
2055 if ((mop & MO_SIZE) == MO_32) {
2056 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2057 } else {
2058 tcg_gen_mov_i64(o->out, old);
2060 tcg_temp_free_i64(old);
2062 /* If the comparison was equal, and the LSB of R2 was set,
2063 then we need to flush the TLB (for all cpus). */
2064 tcg_gen_xori_i64(cc, cc, 1);
2065 tcg_gen_and_i64(cc, cc, o->in2);
2066 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2067 tcg_temp_free_i64(cc);
2069 gen_helper_purge(cpu_env);
2070 gen_set_label(lab);
2072 return NO_EXIT;
2074 #endif
2076 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2078 TCGv_i64 t1 = tcg_temp_new_i64();
2079 TCGv_i32 t2 = tcg_temp_new_i32();
2080 tcg_gen_extrl_i64_i32(t2, o->in1);
2081 gen_helper_cvd(t1, t2);
2082 tcg_temp_free_i32(t2);
2083 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2084 tcg_temp_free_i64(t1);
2085 return NO_EXIT;
2088 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2090 int m3 = get_field(s->fields, m3);
2091 TCGLabel *lab = gen_new_label();
2092 TCGCond c;
2094 c = tcg_invert_cond(ltgt_cond[m3]);
2095 if (s->insn->data) {
2096 c = tcg_unsigned_cond(c);
2098 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2100 /* Trap. */
2101 gen_trap(s);
2103 gen_set_label(lab);
2104 return NO_EXIT;
2107 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2109 int m3 = get_field(s->fields, m3);
2110 int r1 = get_field(s->fields, r1);
2111 int r2 = get_field(s->fields, r2);
2112 TCGv_i32 tr1, tr2, chk;
2114 /* R1 and R2 must both be even. */
2115 if ((r1 | r2) & 1) {
2116 gen_program_exception(s, PGM_SPECIFICATION);
2117 return EXIT_NORETURN;
2119 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2120 m3 = 0;
2123 tr1 = tcg_const_i32(r1);
2124 tr2 = tcg_const_i32(r2);
2125 chk = tcg_const_i32(m3);
2127 switch (s->insn->data) {
2128 case 12:
2129 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2130 break;
2131 case 14:
2132 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2133 break;
2134 case 21:
2135 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2136 break;
2137 case 24:
2138 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2139 break;
2140 case 41:
2141 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2142 break;
2143 case 42:
2144 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2145 break;
2146 default:
2147 g_assert_not_reached();
2150 tcg_temp_free_i32(tr1);
2151 tcg_temp_free_i32(tr2);
2152 tcg_temp_free_i32(chk);
2153 set_cc_static(s);
2154 return NO_EXIT;
2157 #ifndef CONFIG_USER_ONLY
2158 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2160 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2161 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2162 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2164 check_privileged(s);
2165 gen_helper_diag(cpu_env, r1, r3, func_code);
2167 tcg_temp_free_i32(func_code);
2168 tcg_temp_free_i32(r3);
2169 tcg_temp_free_i32(r1);
2170 return NO_EXIT;
2172 #endif
2174 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2176 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2177 return_low128(o->out);
2178 return NO_EXIT;
2181 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2183 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2184 return_low128(o->out);
2185 return NO_EXIT;
2188 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2190 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2191 return_low128(o->out);
2192 return NO_EXIT;
2195 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2197 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2198 return_low128(o->out);
2199 return NO_EXIT;
2202 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2204 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2205 return NO_EXIT;
2208 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2210 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2211 return NO_EXIT;
2214 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2216 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2217 return_low128(o->out2);
2218 return NO_EXIT;
2221 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2223 int r2 = get_field(s->fields, r2);
2224 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2225 return NO_EXIT;
2228 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2230 /* No cache information provided. */
2231 tcg_gen_movi_i64(o->out, -1);
2232 return NO_EXIT;
2235 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2237 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2238 return NO_EXIT;
2241 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2243 int r1 = get_field(s->fields, r1);
2244 int r2 = get_field(s->fields, r2);
2245 TCGv_i64 t = tcg_temp_new_i64();
2247 /* Note the "subsequently" in the PoO, which implies a defined result
2248 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2249 tcg_gen_shri_i64(t, psw_mask, 32);
2250 store_reg32_i64(r1, t);
2251 if (r2 != 0) {
2252 store_reg32_i64(r2, psw_mask);
2255 tcg_temp_free_i64(t);
2256 return NO_EXIT;
2259 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2261 int r1 = get_field(s->fields, r1);
2262 TCGv_i32 ilen;
2263 TCGv_i64 v1;
2265 /* Nested EXECUTE is not allowed. */
2266 if (unlikely(s->ex_value)) {
2267 gen_program_exception(s, PGM_EXECUTE);
2268 return EXIT_NORETURN;
2271 update_psw_addr(s);
2272 update_cc_op(s);
2274 if (r1 == 0) {
2275 v1 = tcg_const_i64(0);
2276 } else {
2277 v1 = regs[r1];
2280 ilen = tcg_const_i32(s->ilen);
2281 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2282 tcg_temp_free_i32(ilen);
2284 if (r1 == 0) {
2285 tcg_temp_free_i64(v1);
2288 return EXIT_PC_CC_UPDATED;
2291 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2293 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2294 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2295 tcg_temp_free_i32(m3);
2296 return NO_EXIT;
2299 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2301 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2302 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2303 tcg_temp_free_i32(m3);
2304 return NO_EXIT;
2307 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2309 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2310 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2311 return_low128(o->out2);
2312 tcg_temp_free_i32(m3);
2313 return NO_EXIT;
2316 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2318 /* We'll use the original input for cc computation, since we get to
2319 compare that against 0, which ought to be better than comparing
2320 the real output against 64. It also lets cc_dst be a convenient
2321 temporary during our computation. */
2322 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2324 /* R1 = IN ? CLZ(IN) : 64. */
2325 tcg_gen_clzi_i64(o->out, o->in2, 64);
2327 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2328 value by 64, which is undefined. But since the shift is 64 iff the
2329 input is zero, we still get the correct result after and'ing. */
2330 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2331 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2332 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2333 return NO_EXIT;
2336 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2338 int m3 = get_field(s->fields, m3);
2339 int pos, len, base = s->insn->data;
2340 TCGv_i64 tmp = tcg_temp_new_i64();
2341 uint64_t ccm;
2343 switch (m3) {
2344 case 0xf:
2345 /* Effectively a 32-bit load. */
2346 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2347 len = 32;
2348 goto one_insert;
2350 case 0xc:
2351 case 0x6:
2352 case 0x3:
2353 /* Effectively a 16-bit load. */
2354 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2355 len = 16;
2356 goto one_insert;
2358 case 0x8:
2359 case 0x4:
2360 case 0x2:
2361 case 0x1:
2362 /* Effectively an 8-bit load. */
2363 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2364 len = 8;
2365 goto one_insert;
2367 one_insert:
2368 pos = base + ctz32(m3) * 8;
2369 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2370 ccm = ((1ull << len) - 1) << pos;
2371 break;
2373 default:
2374 /* This is going to be a sequence of loads and inserts. */
2375 pos = base + 32 - 8;
2376 ccm = 0;
2377 while (m3) {
2378 if (m3 & 0x8) {
2379 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2380 tcg_gen_addi_i64(o->in2, o->in2, 1);
2381 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2382 ccm |= 0xff << pos;
2384 m3 = (m3 << 1) & 0xf;
2385 pos -= 8;
2387 break;
2390 tcg_gen_movi_i64(tmp, ccm);
2391 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2392 tcg_temp_free_i64(tmp);
2393 return NO_EXIT;
2396 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2398 int shift = s->insn->data & 0xff;
2399 int size = s->insn->data >> 8;
2400 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2401 return NO_EXIT;
2404 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2406 TCGv_i64 t1;
2408 gen_op_calc_cc(s);
2409 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2411 t1 = tcg_temp_new_i64();
2412 tcg_gen_shli_i64(t1, psw_mask, 20);
2413 tcg_gen_shri_i64(t1, t1, 36);
2414 tcg_gen_or_i64(o->out, o->out, t1);
2416 tcg_gen_extu_i32_i64(t1, cc_op);
2417 tcg_gen_shli_i64(t1, t1, 28);
2418 tcg_gen_or_i64(o->out, o->out, t1);
2419 tcg_temp_free_i64(t1);
2420 return NO_EXIT;
2423 #ifndef CONFIG_USER_ONLY
2424 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2426 TCGv_i32 m4;
2428 check_privileged(s);
2429 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2430 m4 = tcg_const_i32(get_field(s->fields, m4));
2431 } else {
2432 m4 = tcg_const_i32(0);
2434 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2435 tcg_temp_free_i32(m4);
2436 return NO_EXIT;
2439 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2441 TCGv_i32 m4;
2443 check_privileged(s);
2444 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2445 m4 = tcg_const_i32(get_field(s->fields, m4));
2446 } else {
2447 m4 = tcg_const_i32(0);
2449 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2450 tcg_temp_free_i32(m4);
2451 return NO_EXIT;
2454 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2456 check_privileged(s);
2457 gen_helper_iske(o->out, cpu_env, o->in2);
2458 return NO_EXIT;
2460 #endif
2462 static ExitStatus op_msa(DisasContext *s, DisasOps *o)
2464 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2465 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2466 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2467 TCGv_i32 t_r1, t_r2, t_r3, type;
2469 switch (s->insn->data) {
2470 case S390_FEAT_TYPE_KMCTR:
2471 if (r3 & 1 || !r3) {
2472 gen_program_exception(s, PGM_SPECIFICATION);
2473 return EXIT_NORETURN;
2475 /* FALL THROUGH */
2476 case S390_FEAT_TYPE_PPNO:
2477 case S390_FEAT_TYPE_KMF:
2478 case S390_FEAT_TYPE_KMC:
2479 case S390_FEAT_TYPE_KMO:
2480 case S390_FEAT_TYPE_KM:
2481 if (r1 & 1 || !r1) {
2482 gen_program_exception(s, PGM_SPECIFICATION);
2483 return EXIT_NORETURN;
2485 /* FALL THROUGH */
2486 case S390_FEAT_TYPE_KMAC:
2487 case S390_FEAT_TYPE_KIMD:
2488 case S390_FEAT_TYPE_KLMD:
2489 if (r2 & 1 || !r2) {
2490 gen_program_exception(s, PGM_SPECIFICATION);
2491 return EXIT_NORETURN;
2493 /* FALL THROUGH */
2494 case S390_FEAT_TYPE_PCKMO:
2495 case S390_FEAT_TYPE_PCC:
2496 break;
2497 default:
2498 g_assert_not_reached();
2501 t_r1 = tcg_const_i32(r1);
2502 t_r2 = tcg_const_i32(r2);
2503 t_r3 = tcg_const_i32(r3);
2504 type = tcg_const_i32(s->insn->data);
2505 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2506 set_cc_static(s);
2507 tcg_temp_free_i32(t_r1);
2508 tcg_temp_free_i32(t_r2);
2509 tcg_temp_free_i32(t_r3);
2510 tcg_temp_free_i32(type);
2511 return NO_EXIT;
2514 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2516 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2517 set_cc_static(s);
2518 return NO_EXIT;
2521 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2523 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2524 set_cc_static(s);
2525 return NO_EXIT;
2528 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2530 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2531 set_cc_static(s);
2532 return NO_EXIT;
2535 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2537 /* The real output is indeed the original value in memory;
2538 recompute the addition for the computation of CC. */
2539 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2540 s->insn->data | MO_ALIGN);
2541 /* However, we need to recompute the addition for setting CC. */
2542 tcg_gen_add_i64(o->out, o->in1, o->in2);
2543 return NO_EXIT;
2546 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2548 /* The real output is indeed the original value in memory;
2549 recompute the addition for the computation of CC. */
2550 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2551 s->insn->data | MO_ALIGN);
2552 /* However, we need to recompute the operation for setting CC. */
2553 tcg_gen_and_i64(o->out, o->in1, o->in2);
2554 return NO_EXIT;
2557 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2559 /* The real output is indeed the original value in memory;
2560 recompute the addition for the computation of CC. */
2561 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2562 s->insn->data | MO_ALIGN);
2563 /* However, we need to recompute the operation for setting CC. */
2564 tcg_gen_or_i64(o->out, o->in1, o->in2);
2565 return NO_EXIT;
2568 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2570 /* The real output is indeed the original value in memory;
2571 recompute the addition for the computation of CC. */
2572 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2573 s->insn->data | MO_ALIGN);
2574 /* However, we need to recompute the operation for setting CC. */
2575 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2576 return NO_EXIT;
2579 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2581 gen_helper_ldeb(o->out, cpu_env, o->in2);
2582 return NO_EXIT;
2585 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2587 gen_helper_ledb(o->out, cpu_env, o->in2);
2588 return NO_EXIT;
2591 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2593 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2594 return NO_EXIT;
2597 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2599 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2600 return NO_EXIT;
2603 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2605 gen_helper_lxdb(o->out, cpu_env, o->in2);
2606 return_low128(o->out2);
2607 return NO_EXIT;
2610 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2612 gen_helper_lxeb(o->out, cpu_env, o->in2);
2613 return_low128(o->out2);
2614 return NO_EXIT;
2617 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2619 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2620 return NO_EXIT;
2623 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2625 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2626 return NO_EXIT;
2629 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2631 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2632 return NO_EXIT;
2635 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2637 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2638 return NO_EXIT;
2641 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2643 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2644 return NO_EXIT;
2647 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2649 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2650 return NO_EXIT;
2653 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2655 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2656 return NO_EXIT;
2659 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2661 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2662 return NO_EXIT;
2665 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2667 TCGLabel *lab = gen_new_label();
2668 store_reg32_i64(get_field(s->fields, r1), o->in2);
2669 /* The value is stored even in case of trap. */
2670 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2671 gen_trap(s);
2672 gen_set_label(lab);
2673 return NO_EXIT;
2676 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2678 TCGLabel *lab = gen_new_label();
2679 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2680 /* The value is stored even in case of trap. */
2681 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2682 gen_trap(s);
2683 gen_set_label(lab);
2684 return NO_EXIT;
2687 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2689 TCGLabel *lab = gen_new_label();
2690 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2691 /* The value is stored even in case of trap. */
2692 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2693 gen_trap(s);
2694 gen_set_label(lab);
2695 return NO_EXIT;
2698 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2700 TCGLabel *lab = gen_new_label();
2701 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2702 /* The value is stored even in case of trap. */
2703 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2704 gen_trap(s);
2705 gen_set_label(lab);
2706 return NO_EXIT;
2709 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2711 TCGLabel *lab = gen_new_label();
2712 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2713 /* The value is stored even in case of trap. */
2714 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2715 gen_trap(s);
2716 gen_set_label(lab);
2717 return NO_EXIT;
2720 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2722 DisasCompare c;
2724 disas_jcc(s, &c, get_field(s->fields, m3));
2726 if (c.is_64) {
2727 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2728 o->in2, o->in1);
2729 free_compare(&c);
2730 } else {
2731 TCGv_i32 t32 = tcg_temp_new_i32();
2732 TCGv_i64 t, z;
2734 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2735 free_compare(&c);
2737 t = tcg_temp_new_i64();
2738 tcg_gen_extu_i32_i64(t, t32);
2739 tcg_temp_free_i32(t32);
2741 z = tcg_const_i64(0);
2742 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2743 tcg_temp_free_i64(t);
2744 tcg_temp_free_i64(z);
2747 return NO_EXIT;
2750 #ifndef CONFIG_USER_ONLY
2751 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2753 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2754 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2755 check_privileged(s);
2756 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2757 tcg_temp_free_i32(r1);
2758 tcg_temp_free_i32(r3);
2759 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2760 return EXIT_PC_STALE_NOCHAIN;
2763 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2765 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2766 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2767 check_privileged(s);
2768 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2769 tcg_temp_free_i32(r1);
2770 tcg_temp_free_i32(r3);
2771 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2772 return EXIT_PC_STALE_NOCHAIN;
2775 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2777 check_privileged(s);
2778 gen_helper_lra(o->out, cpu_env, o->in2);
2779 set_cc_static(s);
2780 return NO_EXIT;
2783 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2785 check_privileged(s);
2787 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2788 return NO_EXIT;
2791 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2793 TCGv_i64 t1, t2;
2795 check_privileged(s);
2796 per_breaking_event(s);
2798 t1 = tcg_temp_new_i64();
2799 t2 = tcg_temp_new_i64();
2800 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2801 tcg_gen_addi_i64(o->in2, o->in2, 4);
2802 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2803 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2804 tcg_gen_shli_i64(t1, t1, 32);
2805 gen_helper_load_psw(cpu_env, t1, t2);
2806 tcg_temp_free_i64(t1);
2807 tcg_temp_free_i64(t2);
2808 return EXIT_NORETURN;
2811 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2813 TCGv_i64 t1, t2;
2815 check_privileged(s);
2816 per_breaking_event(s);
2818 t1 = tcg_temp_new_i64();
2819 t2 = tcg_temp_new_i64();
2820 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2821 tcg_gen_addi_i64(o->in2, o->in2, 8);
2822 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2823 gen_helper_load_psw(cpu_env, t1, t2);
2824 tcg_temp_free_i64(t1);
2825 tcg_temp_free_i64(t2);
2826 return EXIT_NORETURN;
2828 #endif
2830 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2832 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2833 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2834 gen_helper_lam(cpu_env, r1, o->in2, r3);
2835 tcg_temp_free_i32(r1);
2836 tcg_temp_free_i32(r3);
2837 return NO_EXIT;
2840 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2842 int r1 = get_field(s->fields, r1);
2843 int r3 = get_field(s->fields, r3);
2844 TCGv_i64 t1, t2;
2846 /* Only one register to read. */
2847 t1 = tcg_temp_new_i64();
2848 if (unlikely(r1 == r3)) {
2849 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2850 store_reg32_i64(r1, t1);
2851 tcg_temp_free(t1);
2852 return NO_EXIT;
2855 /* First load the values of the first and last registers to trigger
2856 possible page faults. */
2857 t2 = tcg_temp_new_i64();
2858 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2859 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2860 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2861 store_reg32_i64(r1, t1);
2862 store_reg32_i64(r3, t2);
2864 /* Only two registers to read. */
2865 if (((r1 + 1) & 15) == r3) {
2866 tcg_temp_free(t2);
2867 tcg_temp_free(t1);
2868 return NO_EXIT;
2871 /* Then load the remaining registers. Page fault can't occur. */
2872 r3 = (r3 - 1) & 15;
2873 tcg_gen_movi_i64(t2, 4);
2874 while (r1 != r3) {
2875 r1 = (r1 + 1) & 15;
2876 tcg_gen_add_i64(o->in2, o->in2, t2);
2877 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2878 store_reg32_i64(r1, t1);
2880 tcg_temp_free(t2);
2881 tcg_temp_free(t1);
2883 return NO_EXIT;
2886 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2888 int r1 = get_field(s->fields, r1);
2889 int r3 = get_field(s->fields, r3);
2890 TCGv_i64 t1, t2;
2892 /* Only one register to read. */
2893 t1 = tcg_temp_new_i64();
2894 if (unlikely(r1 == r3)) {
2895 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2896 store_reg32h_i64(r1, t1);
2897 tcg_temp_free(t1);
2898 return NO_EXIT;
2901 /* First load the values of the first and last registers to trigger
2902 possible page faults. */
2903 t2 = tcg_temp_new_i64();
2904 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2905 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2906 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2907 store_reg32h_i64(r1, t1);
2908 store_reg32h_i64(r3, t2);
2910 /* Only two registers to read. */
2911 if (((r1 + 1) & 15) == r3) {
2912 tcg_temp_free(t2);
2913 tcg_temp_free(t1);
2914 return NO_EXIT;
2917 /* Then load the remaining registers. Page fault can't occur. */
2918 r3 = (r3 - 1) & 15;
2919 tcg_gen_movi_i64(t2, 4);
2920 while (r1 != r3) {
2921 r1 = (r1 + 1) & 15;
2922 tcg_gen_add_i64(o->in2, o->in2, t2);
2923 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2924 store_reg32h_i64(r1, t1);
2926 tcg_temp_free(t2);
2927 tcg_temp_free(t1);
2929 return NO_EXIT;
2932 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2934 int r1 = get_field(s->fields, r1);
2935 int r3 = get_field(s->fields, r3);
2936 TCGv_i64 t1, t2;
2938 /* Only one register to read. */
2939 if (unlikely(r1 == r3)) {
2940 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2941 return NO_EXIT;
2944 /* First load the values of the first and last registers to trigger
2945 possible page faults. */
2946 t1 = tcg_temp_new_i64();
2947 t2 = tcg_temp_new_i64();
2948 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2949 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2950 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2951 tcg_gen_mov_i64(regs[r1], t1);
2952 tcg_temp_free(t2);
2954 /* Only two registers to read. */
2955 if (((r1 + 1) & 15) == r3) {
2956 tcg_temp_free(t1);
2957 return NO_EXIT;
2960 /* Then load the remaining registers. Page fault can't occur. */
2961 r3 = (r3 - 1) & 15;
2962 tcg_gen_movi_i64(t1, 8);
2963 while (r1 != r3) {
2964 r1 = (r1 + 1) & 15;
2965 tcg_gen_add_i64(o->in2, o->in2, t1);
2966 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2968 tcg_temp_free(t1);
2970 return NO_EXIT;
2973 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2975 TCGv_i64 a1, a2;
2976 TCGMemOp mop = s->insn->data;
2978 /* In a parallel context, stop the world and single step. */
2979 if (tb_cflags(s->tb) & CF_PARALLEL) {
2980 update_psw_addr(s);
2981 update_cc_op(s);
2982 gen_exception(EXCP_ATOMIC);
2983 return EXIT_NORETURN;
2986 /* In a serial context, perform the two loads ... */
2987 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2988 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2989 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2990 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2991 tcg_temp_free_i64(a1);
2992 tcg_temp_free_i64(a2);
2994 /* ... and indicate that we performed them while interlocked. */
2995 gen_op_movi_cc(s, 0);
2996 return NO_EXIT;
2999 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
3001 if (tb_cflags(s->tb) & CF_PARALLEL) {
3002 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3003 } else {
3004 gen_helper_lpq(o->out, cpu_env, o->in2);
3006 return_low128(o->out2);
3007 return NO_EXIT;
3010 #ifndef CONFIG_USER_ONLY
3011 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
3013 check_privileged(s);
3014 gen_helper_lura(o->out, cpu_env, o->in2);
3015 return NO_EXIT;
3018 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
3020 check_privileged(s);
3021 gen_helper_lurag(o->out, cpu_env, o->in2);
3022 return NO_EXIT;
3024 #endif
3026 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
3028 tcg_gen_andi_i64(o->out, o->in2, -256);
3029 return NO_EXIT;
3032 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3034 o->out = o->in2;
3035 o->g_out = o->g_in2;
3036 o->in2 = NULL;
3037 o->g_in2 = false;
3038 return NO_EXIT;
3041 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
3043 int b2 = get_field(s->fields, b2);
3044 TCGv ar1 = tcg_temp_new_i64();
3046 o->out = o->in2;
3047 o->g_out = o->g_in2;
3048 o->in2 = NULL;
3049 o->g_in2 = false;
3051 switch (s->tb->flags & FLAG_MASK_ASC) {
3052 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3053 tcg_gen_movi_i64(ar1, 0);
3054 break;
3055 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3056 tcg_gen_movi_i64(ar1, 1);
3057 break;
3058 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3059 if (b2) {
3060 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3061 } else {
3062 tcg_gen_movi_i64(ar1, 0);
3064 break;
3065 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3066 tcg_gen_movi_i64(ar1, 2);
3067 break;
3070 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3071 tcg_temp_free_i64(ar1);
3073 return NO_EXIT;
3076 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3078 o->out = o->in1;
3079 o->out2 = o->in2;
3080 o->g_out = o->g_in1;
3081 o->g_out2 = o->g_in2;
3082 o->in1 = NULL;
3083 o->in2 = NULL;
3084 o->g_in1 = o->g_in2 = false;
3085 return NO_EXIT;
3088 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3090 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3091 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3092 tcg_temp_free_i32(l);
3093 return NO_EXIT;
3096 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3098 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3099 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3100 tcg_temp_free_i32(l);
3101 return NO_EXIT;
3104 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3106 int r1 = get_field(s->fields, r1);
3107 int r2 = get_field(s->fields, r2);
3108 TCGv_i32 t1, t2;
3110 /* r1 and r2 must be even. */
3111 if (r1 & 1 || r2 & 1) {
3112 gen_program_exception(s, PGM_SPECIFICATION);
3113 return EXIT_NORETURN;
3116 t1 = tcg_const_i32(r1);
3117 t2 = tcg_const_i32(r2);
3118 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3119 tcg_temp_free_i32(t1);
3120 tcg_temp_free_i32(t2);
3121 set_cc_static(s);
3122 return NO_EXIT;
3125 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3127 int r1 = get_field(s->fields, r1);
3128 int r3 = get_field(s->fields, r3);
3129 TCGv_i32 t1, t3;
3131 /* r1 and r3 must be even. */
3132 if (r1 & 1 || r3 & 1) {
3133 gen_program_exception(s, PGM_SPECIFICATION);
3134 return EXIT_NORETURN;
3137 t1 = tcg_const_i32(r1);
3138 t3 = tcg_const_i32(r3);
3139 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3140 tcg_temp_free_i32(t1);
3141 tcg_temp_free_i32(t3);
3142 set_cc_static(s);
3143 return NO_EXIT;
3146 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3148 int r1 = get_field(s->fields, r1);
3149 int r3 = get_field(s->fields, r3);
3150 TCGv_i32 t1, t3;
3152 /* r1 and r3 must be even. */
3153 if (r1 & 1 || r3 & 1) {
3154 gen_program_exception(s, PGM_SPECIFICATION);
3155 return EXIT_NORETURN;
3158 t1 = tcg_const_i32(r1);
3159 t3 = tcg_const_i32(r3);
3160 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3161 tcg_temp_free_i32(t1);
3162 tcg_temp_free_i32(t3);
3163 set_cc_static(s);
3164 return NO_EXIT;
3167 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3169 int r3 = get_field(s->fields, r3);
3170 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3171 set_cc_static(s);
3172 return NO_EXIT;
3175 #ifndef CONFIG_USER_ONLY
3176 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3178 int r1 = get_field(s->fields, l1);
3179 check_privileged(s);
3180 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3181 set_cc_static(s);
3182 return NO_EXIT;
3185 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3187 int r1 = get_field(s->fields, l1);
3188 check_privileged(s);
3189 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3190 set_cc_static(s);
3191 return NO_EXIT;
3193 #endif
3195 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3197 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3198 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3199 tcg_temp_free_i32(l);
3200 return NO_EXIT;
3203 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3205 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3206 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3207 tcg_temp_free_i32(l);
3208 return NO_EXIT;
3211 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3213 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3214 set_cc_static(s);
3215 return NO_EXIT;
3218 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3220 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3221 set_cc_static(s);
3222 return_low128(o->in2);
3223 return NO_EXIT;
3226 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3228 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3229 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3230 tcg_temp_free_i32(l);
3231 return NO_EXIT;
3234 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3236 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3237 return NO_EXIT;
3240 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3242 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3243 return NO_EXIT;
3246 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3248 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3249 return NO_EXIT;
3252 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3254 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3255 return NO_EXIT;
3258 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3260 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3261 return NO_EXIT;
3264 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3266 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3267 return_low128(o->out2);
3268 return NO_EXIT;
3271 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3273 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3274 return_low128(o->out2);
3275 return NO_EXIT;
3278 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3280 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3281 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3282 tcg_temp_free_i64(r3);
3283 return NO_EXIT;
3286 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3288 int r3 = get_field(s->fields, r3);
3289 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3290 return NO_EXIT;
3293 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3295 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3296 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3297 tcg_temp_free_i64(r3);
3298 return NO_EXIT;
3301 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3303 int r3 = get_field(s->fields, r3);
3304 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3305 return NO_EXIT;
3308 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3310 TCGv_i64 z, n;
3311 z = tcg_const_i64(0);
3312 n = tcg_temp_new_i64();
3313 tcg_gen_neg_i64(n, o->in2);
3314 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3315 tcg_temp_free_i64(n);
3316 tcg_temp_free_i64(z);
3317 return NO_EXIT;
3320 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3322 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3323 return NO_EXIT;
3326 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3328 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3329 return NO_EXIT;
3332 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3334 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3335 tcg_gen_mov_i64(o->out2, o->in2);
3336 return NO_EXIT;
3339 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3341 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3342 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3343 tcg_temp_free_i32(l);
3344 set_cc_static(s);
3345 return NO_EXIT;
3348 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3350 tcg_gen_neg_i64(o->out, o->in2);
3351 return NO_EXIT;
3354 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3356 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3357 return NO_EXIT;
3360 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3362 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3363 return NO_EXIT;
3366 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3368 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3369 tcg_gen_mov_i64(o->out2, o->in2);
3370 return NO_EXIT;
3373 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3375 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3376 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3377 tcg_temp_free_i32(l);
3378 set_cc_static(s);
3379 return NO_EXIT;
3382 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3384 tcg_gen_or_i64(o->out, o->in1, o->in2);
3385 return NO_EXIT;
3388 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3390 int shift = s->insn->data & 0xff;
3391 int size = s->insn->data >> 8;
3392 uint64_t mask = ((1ull << size) - 1) << shift;
3394 assert(!o->g_in2);
3395 tcg_gen_shli_i64(o->in2, o->in2, shift);
3396 tcg_gen_or_i64(o->out, o->in1, o->in2);
3398 /* Produce the CC from only the bits manipulated. */
3399 tcg_gen_andi_i64(cc_dst, o->out, mask);
3400 set_cc_nz_u64(s, cc_dst);
3401 return NO_EXIT;
3404 static ExitStatus op_oi(DisasContext *s, DisasOps *o)
3406 o->in1 = tcg_temp_new_i64();
3408 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3409 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3410 } else {
3411 /* Perform the atomic operation in memory. */
3412 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3413 s->insn->data);
3416 /* Recompute also for atomic case: needed for setting CC. */
3417 tcg_gen_or_i64(o->out, o->in1, o->in2);
3419 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3420 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3422 return NO_EXIT;
3425 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3427 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3428 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3429 tcg_temp_free_i32(l);
3430 return NO_EXIT;
3433 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3435 int l2 = get_field(s->fields, l2) + 1;
3436 TCGv_i32 l;
3438 /* The length must not exceed 32 bytes. */
3439 if (l2 > 32) {
3440 gen_program_exception(s, PGM_SPECIFICATION);
3441 return EXIT_NORETURN;
3443 l = tcg_const_i32(l2);
3444 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3445 tcg_temp_free_i32(l);
3446 return NO_EXIT;
3449 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3451 int l2 = get_field(s->fields, l2) + 1;
3452 TCGv_i32 l;
3454 /* The length must be even and should not exceed 64 bytes. */
3455 if ((l2 & 1) || (l2 > 64)) {
3456 gen_program_exception(s, PGM_SPECIFICATION);
3457 return EXIT_NORETURN;
3459 l = tcg_const_i32(l2);
3460 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3461 tcg_temp_free_i32(l);
3462 return NO_EXIT;
3465 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3467 gen_helper_popcnt(o->out, o->in2);
3468 return NO_EXIT;
3471 #ifndef CONFIG_USER_ONLY
3472 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3474 check_privileged(s);
3475 gen_helper_ptlb(cpu_env);
3476 return NO_EXIT;
3478 #endif
3480 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3482 int i3 = get_field(s->fields, i3);
3483 int i4 = get_field(s->fields, i4);
3484 int i5 = get_field(s->fields, i5);
3485 int do_zero = i4 & 0x80;
3486 uint64_t mask, imask, pmask;
3487 int pos, len, rot;
3489 /* Adjust the arguments for the specific insn. */
3490 switch (s->fields->op2) {
3491 case 0x55: /* risbg */
3492 case 0x59: /* risbgn */
3493 i3 &= 63;
3494 i4 &= 63;
3495 pmask = ~0;
3496 break;
3497 case 0x5d: /* risbhg */
3498 i3 &= 31;
3499 i4 &= 31;
3500 pmask = 0xffffffff00000000ull;
3501 break;
3502 case 0x51: /* risblg */
3503 i3 &= 31;
3504 i4 &= 31;
3505 pmask = 0x00000000ffffffffull;
3506 break;
3507 default:
3508 g_assert_not_reached();
3511 /* MASK is the set of bits to be inserted from R2.
3512 Take care for I3/I4 wraparound. */
3513 mask = pmask >> i3;
3514 if (i3 <= i4) {
3515 mask ^= pmask >> i4 >> 1;
3516 } else {
3517 mask |= ~(pmask >> i4 >> 1);
3519 mask &= pmask;
3521 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3522 insns, we need to keep the other half of the register. */
3523 imask = ~mask | ~pmask;
3524 if (do_zero) {
3525 imask = ~pmask;
3528 len = i4 - i3 + 1;
3529 pos = 63 - i4;
3530 rot = i5 & 63;
3531 if (s->fields->op2 == 0x5d) {
3532 pos += 32;
3535 /* In some cases we can implement this with extract. */
3536 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3537 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3538 return NO_EXIT;
3541 /* In some cases we can implement this with deposit. */
3542 if (len > 0 && (imask == 0 || ~mask == imask)) {
3543 /* Note that we rotate the bits to be inserted to the lsb, not to
3544 the position as described in the PoO. */
3545 rot = (rot - pos) & 63;
3546 } else {
3547 pos = -1;
3550 /* Rotate the input as necessary. */
3551 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3553 /* Insert the selected bits into the output. */
3554 if (pos >= 0) {
3555 if (imask == 0) {
3556 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3557 } else {
3558 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3560 } else if (imask == 0) {
3561 tcg_gen_andi_i64(o->out, o->in2, mask);
3562 } else {
3563 tcg_gen_andi_i64(o->in2, o->in2, mask);
3564 tcg_gen_andi_i64(o->out, o->out, imask);
3565 tcg_gen_or_i64(o->out, o->out, o->in2);
3567 return NO_EXIT;
3570 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3572 int i3 = get_field(s->fields, i3);
3573 int i4 = get_field(s->fields, i4);
3574 int i5 = get_field(s->fields, i5);
3575 uint64_t mask;
3577 /* If this is a test-only form, arrange to discard the result. */
3578 if (i3 & 0x80) {
3579 o->out = tcg_temp_new_i64();
3580 o->g_out = false;
3583 i3 &= 63;
3584 i4 &= 63;
3585 i5 &= 63;
3587 /* MASK is the set of bits to be operated on from R2.
3588 Take care for I3/I4 wraparound. */
3589 mask = ~0ull >> i3;
3590 if (i3 <= i4) {
3591 mask ^= ~0ull >> i4 >> 1;
3592 } else {
3593 mask |= ~(~0ull >> i4 >> 1);
3596 /* Rotate the input as necessary. */
3597 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3599 /* Operate. */
3600 switch (s->fields->op2) {
3601 case 0x55: /* AND */
3602 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3603 tcg_gen_and_i64(o->out, o->out, o->in2);
3604 break;
3605 case 0x56: /* OR */
3606 tcg_gen_andi_i64(o->in2, o->in2, mask);
3607 tcg_gen_or_i64(o->out, o->out, o->in2);
3608 break;
3609 case 0x57: /* XOR */
3610 tcg_gen_andi_i64(o->in2, o->in2, mask);
3611 tcg_gen_xor_i64(o->out, o->out, o->in2);
3612 break;
3613 default:
3614 abort();
3617 /* Set the CC. */
3618 tcg_gen_andi_i64(cc_dst, o->out, mask);
3619 set_cc_nz_u64(s, cc_dst);
3620 return NO_EXIT;
3623 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3625 tcg_gen_bswap16_i64(o->out, o->in2);
3626 return NO_EXIT;
3629 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3631 tcg_gen_bswap32_i64(o->out, o->in2);
3632 return NO_EXIT;
3635 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3637 tcg_gen_bswap64_i64(o->out, o->in2);
3638 return NO_EXIT;
3641 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3643 TCGv_i32 t1 = tcg_temp_new_i32();
3644 TCGv_i32 t2 = tcg_temp_new_i32();
3645 TCGv_i32 to = tcg_temp_new_i32();
3646 tcg_gen_extrl_i64_i32(t1, o->in1);
3647 tcg_gen_extrl_i64_i32(t2, o->in2);
3648 tcg_gen_rotl_i32(to, t1, t2);
3649 tcg_gen_extu_i32_i64(o->out, to);
3650 tcg_temp_free_i32(t1);
3651 tcg_temp_free_i32(t2);
3652 tcg_temp_free_i32(to);
3653 return NO_EXIT;
3656 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3658 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3659 return NO_EXIT;
3662 #ifndef CONFIG_USER_ONLY
3663 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3665 check_privileged(s);
3666 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3667 set_cc_static(s);
3668 return NO_EXIT;
3671 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3673 check_privileged(s);
3674 gen_helper_sacf(cpu_env, o->in2);
3675 /* Addressing mode has changed, so end the block. */
3676 return EXIT_PC_STALE;
3678 #endif
3680 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3682 int sam = s->insn->data;
3683 TCGv_i64 tsam;
3684 uint64_t mask;
3686 switch (sam) {
3687 case 0:
3688 mask = 0xffffff;
3689 break;
3690 case 1:
3691 mask = 0x7fffffff;
3692 break;
3693 default:
3694 mask = -1;
3695 break;
3698 /* Bizarre but true, we check the address of the current insn for the
3699 specification exception, not the next to be executed. Thus the PoO
3700 documents that Bad Things Happen two bytes before the end. */
3701 if (s->pc & ~mask) {
3702 gen_program_exception(s, PGM_SPECIFICATION);
3703 return EXIT_NORETURN;
3705 s->next_pc &= mask;
3707 tsam = tcg_const_i64(sam);
3708 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3709 tcg_temp_free_i64(tsam);
3711 /* Always exit the TB, since we (may have) changed execution mode. */
3712 return EXIT_PC_STALE;
3715 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3717 int r1 = get_field(s->fields, r1);
3718 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3719 return NO_EXIT;
3722 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3724 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3725 return NO_EXIT;
3728 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3730 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3731 return NO_EXIT;
3734 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3736 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3737 return_low128(o->out2);
3738 return NO_EXIT;
3741 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3743 gen_helper_sqeb(o->out, cpu_env, o->in2);
3744 return NO_EXIT;
3747 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3749 gen_helper_sqdb(o->out, cpu_env, o->in2);
3750 return NO_EXIT;
3753 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3755 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3756 return_low128(o->out2);
3757 return NO_EXIT;
3760 #ifndef CONFIG_USER_ONLY
3761 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3763 check_privileged(s);
3764 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3765 set_cc_static(s);
3766 return NO_EXIT;
3769 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3771 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3772 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3773 check_privileged(s);
3774 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3775 set_cc_static(s);
3776 tcg_temp_free_i32(r1);
3777 tcg_temp_free_i32(r3);
3778 return NO_EXIT;
3780 #endif
3782 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3784 DisasCompare c;
3785 TCGv_i64 a, h;
3786 TCGLabel *lab;
3787 int r1;
3789 disas_jcc(s, &c, get_field(s->fields, m3));
3791 /* We want to store when the condition is fulfilled, so branch
3792 out when it's not */
3793 c.cond = tcg_invert_cond(c.cond);
3795 lab = gen_new_label();
3796 if (c.is_64) {
3797 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3798 } else {
3799 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3801 free_compare(&c);
3803 r1 = get_field(s->fields, r1);
3804 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3805 switch (s->insn->data) {
3806 case 1: /* STOCG */
3807 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3808 break;
3809 case 0: /* STOC */
3810 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3811 break;
3812 case 2: /* STOCFH */
3813 h = tcg_temp_new_i64();
3814 tcg_gen_shri_i64(h, regs[r1], 32);
3815 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3816 tcg_temp_free_i64(h);
3817 break;
3818 default:
3819 g_assert_not_reached();
3821 tcg_temp_free_i64(a);
3823 gen_set_label(lab);
3824 return NO_EXIT;
3827 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3829 uint64_t sign = 1ull << s->insn->data;
3830 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3831 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3832 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3833 /* The arithmetic left shift is curious in that it does not affect
3834 the sign bit. Copy that over from the source unchanged. */
3835 tcg_gen_andi_i64(o->out, o->out, ~sign);
3836 tcg_gen_andi_i64(o->in1, o->in1, sign);
3837 tcg_gen_or_i64(o->out, o->out, o->in1);
3838 return NO_EXIT;
3841 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3843 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3844 return NO_EXIT;
3847 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3849 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3850 return NO_EXIT;
3853 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3855 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3856 return NO_EXIT;
3859 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3861 gen_helper_sfpc(cpu_env, o->in2);
3862 return NO_EXIT;
3865 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3867 gen_helper_sfas(cpu_env, o->in2);
3868 return NO_EXIT;
3871 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3873 int b2 = get_field(s->fields, b2);
3874 int d2 = get_field(s->fields, d2);
3875 TCGv_i64 t1 = tcg_temp_new_i64();
3876 TCGv_i64 t2 = tcg_temp_new_i64();
3877 int mask, pos, len;
3879 switch (s->fields->op2) {
3880 case 0x99: /* SRNM */
3881 pos = 0, len = 2;
3882 break;
3883 case 0xb8: /* SRNMB */
3884 pos = 0, len = 3;
3885 break;
3886 case 0xb9: /* SRNMT */
3887 pos = 4, len = 3;
3888 break;
3889 default:
3890 tcg_abort();
3892 mask = (1 << len) - 1;
3894 /* Insert the value into the appropriate field of the FPC. */
3895 if (b2 == 0) {
3896 tcg_gen_movi_i64(t1, d2 & mask);
3897 } else {
3898 tcg_gen_addi_i64(t1, regs[b2], d2);
3899 tcg_gen_andi_i64(t1, t1, mask);
3901 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3902 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3903 tcg_temp_free_i64(t1);
3905 /* Then install the new FPC to set the rounding mode in fpu_status. */
3906 gen_helper_sfpc(cpu_env, t2);
3907 tcg_temp_free_i64(t2);
3908 return NO_EXIT;
3911 static ExitStatus op_spm(DisasContext *s, DisasOps *o)
3913 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3914 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3915 set_cc_static(s);
3917 tcg_gen_shri_i64(o->in1, o->in1, 24);
3918 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3919 return NO_EXIT;
3922 static ExitStatus op_ectg(DisasContext *s, DisasOps *o)
3924 int b1 = get_field(s->fields, b1);
3925 int d1 = get_field(s->fields, d1);
3926 int b2 = get_field(s->fields, b2);
3927 int d2 = get_field(s->fields, d2);
3928 int r3 = get_field(s->fields, r3);
3929 TCGv_i64 tmp = tcg_temp_new_i64();
3931 /* fetch all operands first */
3932 o->in1 = tcg_temp_new_i64();
3933 tcg_gen_addi_i64(o->in1, regs[b1], d1);
3934 o->in2 = tcg_temp_new_i64();
3935 tcg_gen_addi_i64(o->in2, regs[b2], d2);
3936 o->addr1 = get_address(s, 0, r3, 0);
3938 /* load the third operand into r3 before modifying anything */
3939 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
3941 /* subtract CPU timer from first operand and store in GR0 */
3942 gen_helper_stpt(tmp, cpu_env);
3943 tcg_gen_sub_i64(regs[0], o->in1, tmp);
3945 /* store second operand in GR1 */
3946 tcg_gen_mov_i64(regs[1], o->in2);
3948 tcg_temp_free_i64(tmp);
3949 return NO_EXIT;
3952 #ifndef CONFIG_USER_ONLY
3953 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3955 check_privileged(s);
3956 tcg_gen_shri_i64(o->in2, o->in2, 4);
3957 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3958 return NO_EXIT;
3961 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3963 check_privileged(s);
3964 gen_helper_sske(cpu_env, o->in1, o->in2);
3965 return NO_EXIT;
3968 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3970 check_privileged(s);
3971 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3972 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3973 return EXIT_PC_STALE_NOCHAIN;
3976 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3978 check_privileged(s);
3979 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3980 return NO_EXIT;
3983 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3985 gen_helper_stck(o->out, cpu_env);
3986 /* ??? We don't implement clock states. */
3987 gen_op_movi_cc(s, 0);
3988 return NO_EXIT;
3991 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3993 TCGv_i64 c1 = tcg_temp_new_i64();
3994 TCGv_i64 c2 = tcg_temp_new_i64();
3995 TCGv_i64 todpr = tcg_temp_new_i64();
3996 gen_helper_stck(c1, cpu_env);
3997 /* 16 bit value store in an uint32_t (only valid bits set) */
3998 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
3999 /* Shift the 64-bit value into its place as a zero-extended
4000 104-bit value. Note that "bit positions 64-103 are always
4001 non-zero so that they compare differently to STCK"; we set
4002 the least significant bit to 1. */
4003 tcg_gen_shli_i64(c2, c1, 56);
4004 tcg_gen_shri_i64(c1, c1, 8);
4005 tcg_gen_ori_i64(c2, c2, 0x10000);
4006 tcg_gen_or_i64(c2, c2, todpr);
4007 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4008 tcg_gen_addi_i64(o->in2, o->in2, 8);
4009 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4010 tcg_temp_free_i64(c1);
4011 tcg_temp_free_i64(c2);
4012 tcg_temp_free_i64(todpr);
4013 /* ??? We don't implement clock states. */
4014 gen_op_movi_cc(s, 0);
4015 return NO_EXIT;
4018 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
4020 check_privileged(s);
4021 gen_helper_sckc(cpu_env, o->in2);
4022 return NO_EXIT;
4025 static ExitStatus op_sckpf(DisasContext *s, DisasOps *o)
4027 check_privileged(s);
4028 gen_helper_sckpf(cpu_env, regs[0]);
4029 return NO_EXIT;
4032 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
4034 check_privileged(s);
4035 gen_helper_stckc(o->out, cpu_env);
4036 return NO_EXIT;
4039 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
4041 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4042 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4043 check_privileged(s);
4044 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4045 tcg_temp_free_i32(r1);
4046 tcg_temp_free_i32(r3);
4047 return NO_EXIT;
4050 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
4052 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4053 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4054 check_privileged(s);
4055 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4056 tcg_temp_free_i32(r1);
4057 tcg_temp_free_i32(r3);
4058 return NO_EXIT;
4061 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
4063 check_privileged(s);
4064 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4065 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4066 return NO_EXIT;
4069 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
4071 check_privileged(s);
4072 gen_helper_spt(cpu_env, o->in2);
4073 return NO_EXIT;
4076 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
4078 check_privileged(s);
4079 gen_helper_stfl(cpu_env);
4080 return NO_EXIT;
4083 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
4085 check_privileged(s);
4086 gen_helper_stpt(o->out, cpu_env);
4087 return NO_EXIT;
4090 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
4092 check_privileged(s);
4093 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4094 set_cc_static(s);
4095 return NO_EXIT;
4098 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
4100 check_privileged(s);
4101 gen_helper_spx(cpu_env, o->in2);
4102 return NO_EXIT;
4105 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4107 check_privileged(s);
4108 gen_helper_xsch(cpu_env, regs[1]);
4109 set_cc_static(s);
4110 return NO_EXIT;
4113 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4115 check_privileged(s);
4116 gen_helper_csch(cpu_env, regs[1]);
4117 set_cc_static(s);
4118 return NO_EXIT;
4121 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4123 check_privileged(s);
4124 gen_helper_hsch(cpu_env, regs[1]);
4125 set_cc_static(s);
4126 return NO_EXIT;
4129 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4131 check_privileged(s);
4132 gen_helper_msch(cpu_env, regs[1], o->in2);
4133 set_cc_static(s);
4134 return NO_EXIT;
4137 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4139 check_privileged(s);
4140 gen_helper_rchp(cpu_env, regs[1]);
4141 set_cc_static(s);
4142 return NO_EXIT;
4145 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4147 check_privileged(s);
4148 gen_helper_rsch(cpu_env, regs[1]);
4149 set_cc_static(s);
4150 return NO_EXIT;
4153 static ExitStatus op_sal(DisasContext *s, DisasOps *o)
4155 check_privileged(s);
4156 gen_helper_sal(cpu_env, regs[1]);
4157 return NO_EXIT;
4160 static ExitStatus op_schm(DisasContext *s, DisasOps *o)
4162 check_privileged(s);
4163 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4164 return NO_EXIT;
4167 static ExitStatus op_siga(DisasContext *s, DisasOps *o)
4169 check_privileged(s);
4170 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4171 gen_op_movi_cc(s, 3);
4172 return NO_EXIT;
4175 static ExitStatus op_stcps(DisasContext *s, DisasOps *o)
4177 check_privileged(s);
4178 /* The instruction is suppressed if not provided. */
4179 return NO_EXIT;
4182 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4184 check_privileged(s);
4185 gen_helper_ssch(cpu_env, regs[1], o->in2);
4186 set_cc_static(s);
4187 return NO_EXIT;
4190 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4192 check_privileged(s);
4193 gen_helper_stsch(cpu_env, regs[1], o->in2);
4194 set_cc_static(s);
4195 return NO_EXIT;
4198 static ExitStatus op_stcrw(DisasContext *s, DisasOps *o)
4200 check_privileged(s);
4201 gen_helper_stcrw(cpu_env, o->in2);
4202 set_cc_static(s);
4203 return NO_EXIT;
4206 static ExitStatus op_tpi(DisasContext *s, DisasOps *o)
4208 check_privileged(s);
4209 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4210 set_cc_static(s);
4211 return NO_EXIT;
4214 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4216 check_privileged(s);
4217 gen_helper_tsch(cpu_env, regs[1], o->in2);
4218 set_cc_static(s);
4219 return NO_EXIT;
4222 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4224 check_privileged(s);
4225 gen_helper_chsc(cpu_env, o->in2);
4226 set_cc_static(s);
4227 return NO_EXIT;
4230 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4232 check_privileged(s);
4233 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4234 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4235 return NO_EXIT;
4238 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4240 uint64_t i2 = get_field(s->fields, i2);
4241 TCGv_i64 t;
4243 check_privileged(s);
4245 /* It is important to do what the instruction name says: STORE THEN.
4246 If we let the output hook perform the store then if we fault and
4247 restart, we'll have the wrong SYSTEM MASK in place. */
4248 t = tcg_temp_new_i64();
4249 tcg_gen_shri_i64(t, psw_mask, 56);
4250 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4251 tcg_temp_free_i64(t);
4253 if (s->fields->op == 0xac) {
4254 tcg_gen_andi_i64(psw_mask, psw_mask,
4255 (i2 << 56) | 0x00ffffffffffffffull);
4256 } else {
4257 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4260 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4261 return EXIT_PC_STALE_NOCHAIN;
4264 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4266 check_privileged(s);
4267 gen_helper_stura(cpu_env, o->in2, o->in1);
4268 return NO_EXIT;
4271 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4273 check_privileged(s);
4274 gen_helper_sturg(cpu_env, o->in2, o->in1);
4275 return NO_EXIT;
4277 #endif
4279 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4281 gen_helper_stfle(cc_op, cpu_env, o->in2);
4282 set_cc_static(s);
4283 return NO_EXIT;
4286 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4288 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4289 return NO_EXIT;
4292 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4294 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4295 return NO_EXIT;
4298 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4300 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4301 return NO_EXIT;
4304 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4306 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4307 return NO_EXIT;
4310 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4312 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4313 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4314 gen_helper_stam(cpu_env, r1, o->in2, r3);
4315 tcg_temp_free_i32(r1);
4316 tcg_temp_free_i32(r3);
4317 return NO_EXIT;
4320 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4322 int m3 = get_field(s->fields, m3);
4323 int pos, base = s->insn->data;
4324 TCGv_i64 tmp = tcg_temp_new_i64();
4326 pos = base + ctz32(m3) * 8;
4327 switch (m3) {
4328 case 0xf:
4329 /* Effectively a 32-bit store. */
4330 tcg_gen_shri_i64(tmp, o->in1, pos);
4331 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4332 break;
4334 case 0xc:
4335 case 0x6:
4336 case 0x3:
4337 /* Effectively a 16-bit store. */
4338 tcg_gen_shri_i64(tmp, o->in1, pos);
4339 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4340 break;
4342 case 0x8:
4343 case 0x4:
4344 case 0x2:
4345 case 0x1:
4346 /* Effectively an 8-bit store. */
4347 tcg_gen_shri_i64(tmp, o->in1, pos);
4348 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4349 break;
4351 default:
4352 /* This is going to be a sequence of shifts and stores. */
4353 pos = base + 32 - 8;
4354 while (m3) {
4355 if (m3 & 0x8) {
4356 tcg_gen_shri_i64(tmp, o->in1, pos);
4357 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4358 tcg_gen_addi_i64(o->in2, o->in2, 1);
4360 m3 = (m3 << 1) & 0xf;
4361 pos -= 8;
4363 break;
4365 tcg_temp_free_i64(tmp);
4366 return NO_EXIT;
4369 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4371 int r1 = get_field(s->fields, r1);
4372 int r3 = get_field(s->fields, r3);
4373 int size = s->insn->data;
4374 TCGv_i64 tsize = tcg_const_i64(size);
4376 while (1) {
4377 if (size == 8) {
4378 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4379 } else {
4380 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4382 if (r1 == r3) {
4383 break;
4385 tcg_gen_add_i64(o->in2, o->in2, tsize);
4386 r1 = (r1 + 1) & 15;
4389 tcg_temp_free_i64(tsize);
4390 return NO_EXIT;
4393 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4395 int r1 = get_field(s->fields, r1);
4396 int r3 = get_field(s->fields, r3);
4397 TCGv_i64 t = tcg_temp_new_i64();
4398 TCGv_i64 t4 = tcg_const_i64(4);
4399 TCGv_i64 t32 = tcg_const_i64(32);
4401 while (1) {
4402 tcg_gen_shl_i64(t, regs[r1], t32);
4403 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4404 if (r1 == r3) {
4405 break;
4407 tcg_gen_add_i64(o->in2, o->in2, t4);
4408 r1 = (r1 + 1) & 15;
4411 tcg_temp_free_i64(t);
4412 tcg_temp_free_i64(t4);
4413 tcg_temp_free_i64(t32);
4414 return NO_EXIT;
4417 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4419 if (tb_cflags(s->tb) & CF_PARALLEL) {
4420 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4421 } else {
4422 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4424 return NO_EXIT;
4427 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4429 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4430 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4432 gen_helper_srst(cpu_env, r1, r2);
4434 tcg_temp_free_i32(r1);
4435 tcg_temp_free_i32(r2);
4436 set_cc_static(s);
4437 return NO_EXIT;
4440 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4442 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4443 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4445 gen_helper_srstu(cpu_env, r1, r2);
4447 tcg_temp_free_i32(r1);
4448 tcg_temp_free_i32(r2);
4449 set_cc_static(s);
4450 return NO_EXIT;
4453 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4455 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4456 return NO_EXIT;
4459 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4461 DisasCompare cmp;
4462 TCGv_i64 borrow;
4464 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4466 /* The !borrow flag is the msb of CC. Since we want the inverse of
4467 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4468 disas_jcc(s, &cmp, 8 | 4);
4469 borrow = tcg_temp_new_i64();
4470 if (cmp.is_64) {
4471 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4472 } else {
4473 TCGv_i32 t = tcg_temp_new_i32();
4474 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4475 tcg_gen_extu_i32_i64(borrow, t);
4476 tcg_temp_free_i32(t);
4478 free_compare(&cmp);
4480 tcg_gen_sub_i64(o->out, o->out, borrow);
4481 tcg_temp_free_i64(borrow);
4482 return NO_EXIT;
4485 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4487 TCGv_i32 t;
4489 update_psw_addr(s);
4490 update_cc_op(s);
4492 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4493 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4494 tcg_temp_free_i32(t);
4496 t = tcg_const_i32(s->ilen);
4497 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4498 tcg_temp_free_i32(t);
4500 gen_exception(EXCP_SVC);
4501 return EXIT_NORETURN;
4504 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4506 int cc = 0;
4508 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4509 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4510 gen_op_movi_cc(s, cc);
4511 return NO_EXIT;
4514 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4516 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4517 set_cc_static(s);
4518 return NO_EXIT;
4521 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4523 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4524 set_cc_static(s);
4525 return NO_EXIT;
4528 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4530 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4531 set_cc_static(s);
4532 return NO_EXIT;
4535 #ifndef CONFIG_USER_ONLY
4537 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4539 check_privileged(s);
4540 gen_helper_testblock(cc_op, cpu_env, o->in2);
4541 set_cc_static(s);
4542 return NO_EXIT;
4545 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4547 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4548 set_cc_static(s);
4549 return NO_EXIT;
4552 #endif
4554 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4556 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4557 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4558 tcg_temp_free_i32(l1);
4559 set_cc_static(s);
4560 return NO_EXIT;
4563 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4565 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4566 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4567 tcg_temp_free_i32(l);
4568 set_cc_static(s);
4569 return NO_EXIT;
4572 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4574 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4575 return_low128(o->out2);
4576 set_cc_static(s);
4577 return NO_EXIT;
4580 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4582 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4583 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4584 tcg_temp_free_i32(l);
4585 set_cc_static(s);
4586 return NO_EXIT;
4589 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4591 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4592 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4593 tcg_temp_free_i32(l);
4594 set_cc_static(s);
4595 return NO_EXIT;
4598 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4600 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4601 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4602 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4603 TCGv_i32 tst = tcg_temp_new_i32();
4604 int m3 = get_field(s->fields, m3);
4606 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4607 m3 = 0;
4609 if (m3 & 1) {
4610 tcg_gen_movi_i32(tst, -1);
4611 } else {
4612 tcg_gen_extrl_i64_i32(tst, regs[0]);
4613 if (s->insn->opc & 3) {
4614 tcg_gen_ext8u_i32(tst, tst);
4615 } else {
4616 tcg_gen_ext16u_i32(tst, tst);
4619 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4621 tcg_temp_free_i32(r1);
4622 tcg_temp_free_i32(r2);
4623 tcg_temp_free_i32(sizes);
4624 tcg_temp_free_i32(tst);
4625 set_cc_static(s);
4626 return NO_EXIT;
4629 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4631 TCGv_i32 t1 = tcg_const_i32(0xff);
4632 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4633 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4634 tcg_temp_free_i32(t1);
4635 set_cc_static(s);
4636 return NO_EXIT;
4639 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4641 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4642 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4643 tcg_temp_free_i32(l);
4644 return NO_EXIT;
4647 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4649 int l1 = get_field(s->fields, l1) + 1;
4650 TCGv_i32 l;
4652 /* The length must not exceed 32 bytes. */
4653 if (l1 > 32) {
4654 gen_program_exception(s, PGM_SPECIFICATION);
4655 return EXIT_NORETURN;
4657 l = tcg_const_i32(l1);
4658 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4659 tcg_temp_free_i32(l);
4660 set_cc_static(s);
4661 return NO_EXIT;
4664 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4666 int l1 = get_field(s->fields, l1) + 1;
4667 TCGv_i32 l;
4669 /* The length must be even and should not exceed 64 bytes. */
4670 if ((l1 & 1) || (l1 > 64)) {
4671 gen_program_exception(s, PGM_SPECIFICATION);
4672 return EXIT_NORETURN;
4674 l = tcg_const_i32(l1);
4675 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4676 tcg_temp_free_i32(l);
4677 set_cc_static(s);
4678 return NO_EXIT;
4682 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4684 int d1 = get_field(s->fields, d1);
4685 int d2 = get_field(s->fields, d2);
4686 int b1 = get_field(s->fields, b1);
4687 int b2 = get_field(s->fields, b2);
4688 int l = get_field(s->fields, l1);
4689 TCGv_i32 t32;
4691 o->addr1 = get_address(s, 0, b1, d1);
4693 /* If the addresses are identical, this is a store/memset of zero. */
4694 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4695 o->in2 = tcg_const_i64(0);
4697 l++;
4698 while (l >= 8) {
4699 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4700 l -= 8;
4701 if (l > 0) {
4702 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4705 if (l >= 4) {
4706 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4707 l -= 4;
4708 if (l > 0) {
4709 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4712 if (l >= 2) {
4713 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4714 l -= 2;
4715 if (l > 0) {
4716 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4719 if (l) {
4720 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4722 gen_op_movi_cc(s, 0);
4723 return NO_EXIT;
4726 /* But in general we'll defer to a helper. */
4727 o->in2 = get_address(s, 0, b2, d2);
4728 t32 = tcg_const_i32(l);
4729 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4730 tcg_temp_free_i32(t32);
4731 set_cc_static(s);
4732 return NO_EXIT;
4735 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4737 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4738 return NO_EXIT;
4741 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4743 int shift = s->insn->data & 0xff;
4744 int size = s->insn->data >> 8;
4745 uint64_t mask = ((1ull << size) - 1) << shift;
4747 assert(!o->g_in2);
4748 tcg_gen_shli_i64(o->in2, o->in2, shift);
4749 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4751 /* Produce the CC from only the bits manipulated. */
4752 tcg_gen_andi_i64(cc_dst, o->out, mask);
4753 set_cc_nz_u64(s, cc_dst);
4754 return NO_EXIT;
4757 static ExitStatus op_xi(DisasContext *s, DisasOps *o)
4759 o->in1 = tcg_temp_new_i64();
4761 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4762 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4763 } else {
4764 /* Perform the atomic operation in memory. */
4765 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4766 s->insn->data);
4769 /* Recompute also for atomic case: needed for setting CC. */
4770 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4772 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4773 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4775 return NO_EXIT;
4778 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4780 o->out = tcg_const_i64(0);
4781 return NO_EXIT;
4784 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4786 o->out = tcg_const_i64(0);
4787 o->out2 = o->out;
4788 o->g_out2 = true;
4789 return NO_EXIT;
4792 #ifndef CONFIG_USER_ONLY
4793 static ExitStatus op_clp(DisasContext *s, DisasOps *o)
4795 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4797 check_privileged(s);
4798 gen_helper_clp(cpu_env, r2);
4799 tcg_temp_free_i32(r2);
4800 set_cc_static(s);
4801 return NO_EXIT;
4804 static ExitStatus op_pcilg(DisasContext *s, DisasOps *o)
4806 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4807 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4809 check_privileged(s);
4810 gen_helper_pcilg(cpu_env, r1, r2);
4811 tcg_temp_free_i32(r1);
4812 tcg_temp_free_i32(r2);
4813 set_cc_static(s);
4814 return NO_EXIT;
4817 static ExitStatus op_pcistg(DisasContext *s, DisasOps *o)
4819 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4820 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4822 check_privileged(s);
4823 gen_helper_pcistg(cpu_env, r1, r2);
4824 tcg_temp_free_i32(r1);
4825 tcg_temp_free_i32(r2);
4826 set_cc_static(s);
4827 return NO_EXIT;
4830 static ExitStatus op_stpcifc(DisasContext *s, DisasOps *o)
4832 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4833 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4835 check_privileged(s);
4836 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4837 tcg_temp_free_i32(ar);
4838 tcg_temp_free_i32(r1);
4839 set_cc_static(s);
4840 return NO_EXIT;
4843 static ExitStatus op_sic(DisasContext *s, DisasOps *o)
4845 check_privileged(s);
4846 gen_helper_sic(cpu_env, o->in1, o->in2);
4847 return NO_EXIT;
4850 static ExitStatus op_rpcit(DisasContext *s, DisasOps *o)
4852 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4853 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4855 check_privileged(s);
4856 gen_helper_rpcit(cpu_env, r1, r2);
4857 tcg_temp_free_i32(r1);
4858 tcg_temp_free_i32(r2);
4859 set_cc_static(s);
4860 return NO_EXIT;
4863 static ExitStatus op_pcistb(DisasContext *s, DisasOps *o)
4865 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4866 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4867 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4869 check_privileged(s);
4870 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4871 tcg_temp_free_i32(ar);
4872 tcg_temp_free_i32(r1);
4873 tcg_temp_free_i32(r3);
4874 set_cc_static(s);
4875 return NO_EXIT;
4878 static ExitStatus op_mpcifc(DisasContext *s, DisasOps *o)
4880 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4881 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4883 check_privileged(s);
4884 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4885 tcg_temp_free_i32(ar);
4886 tcg_temp_free_i32(r1);
4887 set_cc_static(s);
4888 return NO_EXIT;
4890 #endif
4892 /* ====================================================================== */
4893 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4894 the original inputs), update the various cc data structures in order to
4895 be able to compute the new condition code. */
4897 static void cout_abs32(DisasContext *s, DisasOps *o)
4899 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4902 static void cout_abs64(DisasContext *s, DisasOps *o)
4904 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4907 static void cout_adds32(DisasContext *s, DisasOps *o)
4909 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4912 static void cout_adds64(DisasContext *s, DisasOps *o)
4914 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4917 static void cout_addu32(DisasContext *s, DisasOps *o)
4919 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4922 static void cout_addu64(DisasContext *s, DisasOps *o)
4924 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4927 static void cout_addc32(DisasContext *s, DisasOps *o)
4929 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4932 static void cout_addc64(DisasContext *s, DisasOps *o)
4934 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4937 static void cout_cmps32(DisasContext *s, DisasOps *o)
4939 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4942 static void cout_cmps64(DisasContext *s, DisasOps *o)
4944 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4947 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4949 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4952 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4954 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4957 static void cout_f32(DisasContext *s, DisasOps *o)
4959 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4962 static void cout_f64(DisasContext *s, DisasOps *o)
4964 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4967 static void cout_f128(DisasContext *s, DisasOps *o)
4969 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4972 static void cout_nabs32(DisasContext *s, DisasOps *o)
4974 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4977 static void cout_nabs64(DisasContext *s, DisasOps *o)
4979 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4982 static void cout_neg32(DisasContext *s, DisasOps *o)
4984 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4987 static void cout_neg64(DisasContext *s, DisasOps *o)
4989 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4992 static void cout_nz32(DisasContext *s, DisasOps *o)
4994 tcg_gen_ext32u_i64(cc_dst, o->out);
4995 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4998 static void cout_nz64(DisasContext *s, DisasOps *o)
5000 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5003 static void cout_s32(DisasContext *s, DisasOps *o)
5005 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5008 static void cout_s64(DisasContext *s, DisasOps *o)
5010 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5013 static void cout_subs32(DisasContext *s, DisasOps *o)
5015 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5018 static void cout_subs64(DisasContext *s, DisasOps *o)
5020 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5023 static void cout_subu32(DisasContext *s, DisasOps *o)
5025 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5028 static void cout_subu64(DisasContext *s, DisasOps *o)
5030 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5033 static void cout_subb32(DisasContext *s, DisasOps *o)
5035 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5038 static void cout_subb64(DisasContext *s, DisasOps *o)
5040 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5043 static void cout_tm32(DisasContext *s, DisasOps *o)
5045 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5048 static void cout_tm64(DisasContext *s, DisasOps *o)
5050 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5053 /* ====================================================================== */
5054 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5055 with the TCG register to which we will write. Used in combination with
5056 the "wout" generators, in some cases we need a new temporary, and in
5057 some cases we can write to a TCG global. */
5059 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5061 o->out = tcg_temp_new_i64();
5063 #define SPEC_prep_new 0
5065 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5067 o->out = tcg_temp_new_i64();
5068 o->out2 = tcg_temp_new_i64();
5070 #define SPEC_prep_new_P 0
5072 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5074 o->out = regs[get_field(f, r1)];
5075 o->g_out = true;
5077 #define SPEC_prep_r1 0
5079 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5081 int r1 = get_field(f, r1);
5082 o->out = regs[r1];
5083 o->out2 = regs[r1 + 1];
5084 o->g_out = o->g_out2 = true;
5086 #define SPEC_prep_r1_P SPEC_r1_even
5088 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5090 o->out = fregs[get_field(f, r1)];
5091 o->g_out = true;
5093 #define SPEC_prep_f1 0
5095 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5097 int r1 = get_field(f, r1);
5098 o->out = fregs[r1];
5099 o->out2 = fregs[r1 + 2];
5100 o->g_out = o->g_out2 = true;
5102 #define SPEC_prep_x1 SPEC_r1_f128
5104 /* ====================================================================== */
5105 /* The "Write OUTput" generators. These generally perform some non-trivial
5106 copy of data to TCG globals, or to main memory. The trivial cases are
5107 generally handled by having a "prep" generator install the TCG global
5108 as the destination of the operation. */
5110 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5112 store_reg(get_field(f, r1), o->out);
5114 #define SPEC_wout_r1 0
5116 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5118 int r1 = get_field(f, r1);
5119 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5121 #define SPEC_wout_r1_8 0
5123 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5125 int r1 = get_field(f, r1);
5126 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5128 #define SPEC_wout_r1_16 0
5130 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5132 store_reg32_i64(get_field(f, r1), o->out);
5134 #define SPEC_wout_r1_32 0
5136 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5138 store_reg32h_i64(get_field(f, r1), o->out);
5140 #define SPEC_wout_r1_32h 0
5142 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5144 int r1 = get_field(f, r1);
5145 store_reg32_i64(r1, o->out);
5146 store_reg32_i64(r1 + 1, o->out2);
5148 #define SPEC_wout_r1_P32 SPEC_r1_even
5150 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5152 int r1 = get_field(f, r1);
5153 store_reg32_i64(r1 + 1, o->out);
5154 tcg_gen_shri_i64(o->out, o->out, 32);
5155 store_reg32_i64(r1, o->out);
5157 #define SPEC_wout_r1_D32 SPEC_r1_even
5159 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5161 int r3 = get_field(f, r3);
5162 store_reg32_i64(r3, o->out);
5163 store_reg32_i64(r3 + 1, o->out2);
5165 #define SPEC_wout_r3_P32 SPEC_r3_even
5167 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5169 int r3 = get_field(f, r3);
5170 store_reg(r3, o->out);
5171 store_reg(r3 + 1, o->out2);
5173 #define SPEC_wout_r3_P64 SPEC_r3_even
5175 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5177 store_freg32_i64(get_field(f, r1), o->out);
5179 #define SPEC_wout_e1 0
5181 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5183 store_freg(get_field(f, r1), o->out);
5185 #define SPEC_wout_f1 0
5187 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5189 int f1 = get_field(s->fields, r1);
5190 store_freg(f1, o->out);
5191 store_freg(f1 + 2, o->out2);
5193 #define SPEC_wout_x1 SPEC_r1_f128
5195 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5197 if (get_field(f, r1) != get_field(f, r2)) {
5198 store_reg32_i64(get_field(f, r1), o->out);
5201 #define SPEC_wout_cond_r1r2_32 0
5203 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5205 if (get_field(f, r1) != get_field(f, r2)) {
5206 store_freg32_i64(get_field(f, r1), o->out);
5209 #define SPEC_wout_cond_e1e2 0
5211 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5213 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5215 #define SPEC_wout_m1_8 0
5217 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5219 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5221 #define SPEC_wout_m1_16 0
5223 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5225 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5227 #define SPEC_wout_m1_32 0
5229 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5231 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5233 #define SPEC_wout_m1_64 0
5235 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5237 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5239 #define SPEC_wout_m2_32 0
5241 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5243 store_reg(get_field(f, r1), o->in2);
5245 #define SPEC_wout_in2_r1 0
5247 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5249 store_reg32_i64(get_field(f, r1), o->in2);
5251 #define SPEC_wout_in2_r1_32 0
5253 /* ====================================================================== */
5254 /* The "INput 1" generators. These load the first operand to an insn. */
5256 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5258 o->in1 = load_reg(get_field(f, r1));
5260 #define SPEC_in1_r1 0
5262 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5264 o->in1 = regs[get_field(f, r1)];
5265 o->g_in1 = true;
5267 #define SPEC_in1_r1_o 0
5269 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5271 o->in1 = tcg_temp_new_i64();
5272 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5274 #define SPEC_in1_r1_32s 0
5276 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5278 o->in1 = tcg_temp_new_i64();
5279 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5281 #define SPEC_in1_r1_32u 0
5283 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5285 o->in1 = tcg_temp_new_i64();
5286 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5288 #define SPEC_in1_r1_sr32 0
5290 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5292 o->in1 = load_reg(get_field(f, r1) + 1);
5294 #define SPEC_in1_r1p1 SPEC_r1_even
5296 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5298 o->in1 = tcg_temp_new_i64();
5299 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5301 #define SPEC_in1_r1p1_32s SPEC_r1_even
5303 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5305 o->in1 = tcg_temp_new_i64();
5306 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5308 #define SPEC_in1_r1p1_32u SPEC_r1_even
5310 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5312 int r1 = get_field(f, r1);
5313 o->in1 = tcg_temp_new_i64();
5314 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5316 #define SPEC_in1_r1_D32 SPEC_r1_even
5318 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5320 o->in1 = load_reg(get_field(f, r2));
5322 #define SPEC_in1_r2 0
5324 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5326 o->in1 = tcg_temp_new_i64();
5327 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5329 #define SPEC_in1_r2_sr32 0
5331 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5333 o->in1 = load_reg(get_field(f, r3));
5335 #define SPEC_in1_r3 0
5337 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5339 o->in1 = regs[get_field(f, r3)];
5340 o->g_in1 = true;
5342 #define SPEC_in1_r3_o 0
5344 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5346 o->in1 = tcg_temp_new_i64();
5347 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5349 #define SPEC_in1_r3_32s 0
5351 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5353 o->in1 = tcg_temp_new_i64();
5354 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5356 #define SPEC_in1_r3_32u 0
5358 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5360 int r3 = get_field(f, r3);
5361 o->in1 = tcg_temp_new_i64();
5362 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5364 #define SPEC_in1_r3_D32 SPEC_r3_even
5366 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5368 o->in1 = load_freg32_i64(get_field(f, r1));
5370 #define SPEC_in1_e1 0
5372 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5374 o->in1 = fregs[get_field(f, r1)];
5375 o->g_in1 = true;
5377 #define SPEC_in1_f1_o 0
5379 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5381 int r1 = get_field(f, r1);
5382 o->out = fregs[r1];
5383 o->out2 = fregs[r1 + 2];
5384 o->g_out = o->g_out2 = true;
5386 #define SPEC_in1_x1_o SPEC_r1_f128
5388 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5390 o->in1 = fregs[get_field(f, r3)];
5391 o->g_in1 = true;
5393 #define SPEC_in1_f3_o 0
5395 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5397 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5399 #define SPEC_in1_la1 0
5401 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5403 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5404 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5406 #define SPEC_in1_la2 0
5408 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5410 in1_la1(s, f, o);
5411 o->in1 = tcg_temp_new_i64();
5412 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5414 #define SPEC_in1_m1_8u 0
5416 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5418 in1_la1(s, f, o);
5419 o->in1 = tcg_temp_new_i64();
5420 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5422 #define SPEC_in1_m1_16s 0
5424 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5426 in1_la1(s, f, o);
5427 o->in1 = tcg_temp_new_i64();
5428 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5430 #define SPEC_in1_m1_16u 0
5432 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5434 in1_la1(s, f, o);
5435 o->in1 = tcg_temp_new_i64();
5436 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5438 #define SPEC_in1_m1_32s 0
5440 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5442 in1_la1(s, f, o);
5443 o->in1 = tcg_temp_new_i64();
5444 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5446 #define SPEC_in1_m1_32u 0
5448 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5450 in1_la1(s, f, o);
5451 o->in1 = tcg_temp_new_i64();
5452 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5454 #define SPEC_in1_m1_64 0
5456 /* ====================================================================== */
5457 /* The "INput 2" generators. These load the second operand to an insn. */
5459 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5461 o->in2 = regs[get_field(f, r1)];
5462 o->g_in2 = true;
5464 #define SPEC_in2_r1_o 0
5466 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5468 o->in2 = tcg_temp_new_i64();
5469 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5471 #define SPEC_in2_r1_16u 0
5473 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5475 o->in2 = tcg_temp_new_i64();
5476 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5478 #define SPEC_in2_r1_32u 0
5480 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5482 int r1 = get_field(f, r1);
5483 o->in2 = tcg_temp_new_i64();
5484 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5486 #define SPEC_in2_r1_D32 SPEC_r1_even
5488 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5490 o->in2 = load_reg(get_field(f, r2));
5492 #define SPEC_in2_r2 0
5494 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5496 o->in2 = regs[get_field(f, r2)];
5497 o->g_in2 = true;
5499 #define SPEC_in2_r2_o 0
5501 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5503 int r2 = get_field(f, r2);
5504 if (r2 != 0) {
5505 o->in2 = load_reg(r2);
5508 #define SPEC_in2_r2_nz 0
5510 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5512 o->in2 = tcg_temp_new_i64();
5513 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5515 #define SPEC_in2_r2_8s 0
5517 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5519 o->in2 = tcg_temp_new_i64();
5520 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5522 #define SPEC_in2_r2_8u 0
5524 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5526 o->in2 = tcg_temp_new_i64();
5527 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5529 #define SPEC_in2_r2_16s 0
5531 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5533 o->in2 = tcg_temp_new_i64();
5534 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5536 #define SPEC_in2_r2_16u 0
5538 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5540 o->in2 = load_reg(get_field(f, r3));
5542 #define SPEC_in2_r3 0
5544 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5546 o->in2 = tcg_temp_new_i64();
5547 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5549 #define SPEC_in2_r3_sr32 0
5551 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5553 o->in2 = tcg_temp_new_i64();
5554 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5556 #define SPEC_in2_r2_32s 0
5558 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5560 o->in2 = tcg_temp_new_i64();
5561 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5563 #define SPEC_in2_r2_32u 0
5565 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5567 o->in2 = tcg_temp_new_i64();
5568 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5570 #define SPEC_in2_r2_sr32 0
5572 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5574 o->in2 = load_freg32_i64(get_field(f, r2));
5576 #define SPEC_in2_e2 0
5578 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5580 o->in2 = fregs[get_field(f, r2)];
5581 o->g_in2 = true;
5583 #define SPEC_in2_f2_o 0
5585 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5587 int r2 = get_field(f, r2);
5588 o->in1 = fregs[r2];
5589 o->in2 = fregs[r2 + 2];
5590 o->g_in1 = o->g_in2 = true;
5592 #define SPEC_in2_x2_o SPEC_r2_f128
5594 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5596 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5598 #define SPEC_in2_ra2 0
5600 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5602 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5603 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5605 #define SPEC_in2_a2 0
5607 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5609 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5611 #define SPEC_in2_ri2 0
5613 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5615 help_l2_shift(s, f, o, 31);
5617 #define SPEC_in2_sh32 0
5619 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5621 help_l2_shift(s, f, o, 63);
5623 #define SPEC_in2_sh64 0
5625 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5627 in2_a2(s, f, o);
5628 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5630 #define SPEC_in2_m2_8u 0
5632 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5634 in2_a2(s, f, o);
5635 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5637 #define SPEC_in2_m2_16s 0
5639 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5641 in2_a2(s, f, o);
5642 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5644 #define SPEC_in2_m2_16u 0
5646 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5648 in2_a2(s, f, o);
5649 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5651 #define SPEC_in2_m2_32s 0
5653 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5655 in2_a2(s, f, o);
5656 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5658 #define SPEC_in2_m2_32u 0
5660 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5662 in2_a2(s, f, o);
5663 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5665 #define SPEC_in2_m2_64 0
5667 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5669 in2_ri2(s, f, o);
5670 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5672 #define SPEC_in2_mri2_16u 0
5674 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5676 in2_ri2(s, f, o);
5677 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5679 #define SPEC_in2_mri2_32s 0
5681 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5683 in2_ri2(s, f, o);
5684 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5686 #define SPEC_in2_mri2_32u 0
5688 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5690 in2_ri2(s, f, o);
5691 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5693 #define SPEC_in2_mri2_64 0
5695 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5697 o->in2 = tcg_const_i64(get_field(f, i2));
5699 #define SPEC_in2_i2 0
5701 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5703 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5705 #define SPEC_in2_i2_8u 0
5707 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5709 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5711 #define SPEC_in2_i2_16u 0
5713 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5715 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5717 #define SPEC_in2_i2_32u 0
5719 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5721 uint64_t i2 = (uint16_t)get_field(f, i2);
5722 o->in2 = tcg_const_i64(i2 << s->insn->data);
5724 #define SPEC_in2_i2_16u_shl 0
5726 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5728 uint64_t i2 = (uint32_t)get_field(f, i2);
5729 o->in2 = tcg_const_i64(i2 << s->insn->data);
5731 #define SPEC_in2_i2_32u_shl 0
5733 #ifndef CONFIG_USER_ONLY
5734 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5736 o->in2 = tcg_const_i64(s->fields->raw_insn);
5738 #define SPEC_in2_insn 0
5739 #endif
5741 /* ====================================================================== */
5743 /* Find opc within the table of insns. This is formulated as a switch
5744 statement so that (1) we get compile-time notice of cut-paste errors
5745 for duplicated opcodes, and (2) the compiler generates the binary
5746 search tree, rather than us having to post-process the table. */
5748 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5749 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5751 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5753 enum DisasInsnEnum {
5754 #include "insn-data.def"
5757 #undef D
5758 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5759 .opc = OPC, \
5760 .fmt = FMT_##FT, \
5761 .fac = FAC_##FC, \
5762 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5763 .name = #NM, \
5764 .help_in1 = in1_##I1, \
5765 .help_in2 = in2_##I2, \
5766 .help_prep = prep_##P, \
5767 .help_wout = wout_##W, \
5768 .help_cout = cout_##CC, \
5769 .help_op = op_##OP, \
5770 .data = D \
5773 /* Allow 0 to be used for NULL in the table below. */
5774 #define in1_0 NULL
5775 #define in2_0 NULL
5776 #define prep_0 NULL
5777 #define wout_0 NULL
5778 #define cout_0 NULL
5779 #define op_0 NULL
5781 #define SPEC_in1_0 0
5782 #define SPEC_in2_0 0
5783 #define SPEC_prep_0 0
5784 #define SPEC_wout_0 0
5786 /* Give smaller names to the various facilities. */
5787 #define FAC_Z S390_FEAT_ZARCH
5788 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5789 #define FAC_DFP S390_FEAT_DFP
5790 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5791 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5792 #define FAC_EE S390_FEAT_EXECUTE_EXT
5793 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5794 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5795 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5796 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5797 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5798 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5799 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5800 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5801 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5802 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5803 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5804 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5805 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5806 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5807 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5808 #define FAC_SFLE S390_FEAT_STFLE
5809 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5810 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5811 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5812 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5813 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5814 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5815 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5816 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5817 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5818 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5819 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5820 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5821 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5822 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5823 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5824 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5826 static const DisasInsn insn_info[] = {
5827 #include "insn-data.def"
5830 #undef D
5831 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5832 case OPC: return &insn_info[insn_ ## NM];
5834 static const DisasInsn *lookup_opc(uint16_t opc)
5836 switch (opc) {
5837 #include "insn-data.def"
5838 default:
5839 return NULL;
5843 #undef D
5844 #undef C
5846 /* Extract a field from the insn. The INSN should be left-aligned in
5847 the uint64_t so that we can more easily utilize the big-bit-endian
5848 definitions we extract from the Principals of Operation. */
5850 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5852 uint32_t r, m;
5854 if (f->size == 0) {
5855 return;
5858 /* Zero extract the field from the insn. */
5859 r = (insn << f->beg) >> (64 - f->size);
5861 /* Sign-extend, or un-swap the field as necessary. */
5862 switch (f->type) {
5863 case 0: /* unsigned */
5864 break;
5865 case 1: /* signed */
5866 assert(f->size <= 32);
5867 m = 1u << (f->size - 1);
5868 r = (r ^ m) - m;
5869 break;
5870 case 2: /* dl+dh split, signed 20 bit. */
5871 r = ((int8_t)r << 12) | (r >> 8);
5872 break;
5873 default:
5874 abort();
5877 /* Validate that the "compressed" encoding we selected above is valid.
5878 I.e. we havn't make two different original fields overlap. */
5879 assert(((o->presentC >> f->indexC) & 1) == 0);
5880 o->presentC |= 1 << f->indexC;
5881 o->presentO |= 1 << f->indexO;
5883 o->c[f->indexC] = r;
5886 /* Lookup the insn at the current PC, extracting the operands into O and
5887 returning the info struct for the insn. Returns NULL for invalid insn. */
5889 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5890 DisasFields *f)
5892 uint64_t insn, pc = s->pc;
5893 int op, op2, ilen;
5894 const DisasInsn *info;
5896 if (unlikely(s->ex_value)) {
5897 /* Drop the EX data now, so that it's clear on exception paths. */
5898 TCGv_i64 zero = tcg_const_i64(0);
5899 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5900 tcg_temp_free_i64(zero);
5902 /* Extract the values saved by EXECUTE. */
5903 insn = s->ex_value & 0xffffffffffff0000ull;
5904 ilen = s->ex_value & 0xf;
5905 op = insn >> 56;
5906 } else {
5907 insn = ld_code2(env, pc);
5908 op = (insn >> 8) & 0xff;
5909 ilen = get_ilen(op);
5910 switch (ilen) {
5911 case 2:
5912 insn = insn << 48;
5913 break;
5914 case 4:
5915 insn = ld_code4(env, pc) << 32;
5916 break;
5917 case 6:
5918 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5919 break;
5920 default:
5921 g_assert_not_reached();
5924 s->next_pc = s->pc + ilen;
5925 s->ilen = ilen;
5927 /* We can't actually determine the insn format until we've looked up
5928 the full insn opcode. Which we can't do without locating the
5929 secondary opcode. Assume by default that OP2 is at bit 40; for
5930 those smaller insns that don't actually have a secondary opcode
5931 this will correctly result in OP2 = 0. */
5932 switch (op) {
5933 case 0x01: /* E */
5934 case 0x80: /* S */
5935 case 0x82: /* S */
5936 case 0x93: /* S */
5937 case 0xb2: /* S, RRF, RRE, IE */
5938 case 0xb3: /* RRE, RRD, RRF */
5939 case 0xb9: /* RRE, RRF */
5940 case 0xe5: /* SSE, SIL */
5941 op2 = (insn << 8) >> 56;
5942 break;
5943 case 0xa5: /* RI */
5944 case 0xa7: /* RI */
5945 case 0xc0: /* RIL */
5946 case 0xc2: /* RIL */
5947 case 0xc4: /* RIL */
5948 case 0xc6: /* RIL */
5949 case 0xc8: /* SSF */
5950 case 0xcc: /* RIL */
5951 op2 = (insn << 12) >> 60;
5952 break;
5953 case 0xc5: /* MII */
5954 case 0xc7: /* SMI */
5955 case 0xd0 ... 0xdf: /* SS */
5956 case 0xe1: /* SS */
5957 case 0xe2: /* SS */
5958 case 0xe8: /* SS */
5959 case 0xe9: /* SS */
5960 case 0xea: /* SS */
5961 case 0xee ... 0xf3: /* SS */
5962 case 0xf8 ... 0xfd: /* SS */
5963 op2 = 0;
5964 break;
5965 default:
5966 op2 = (insn << 40) >> 56;
5967 break;
5970 memset(f, 0, sizeof(*f));
5971 f->raw_insn = insn;
5972 f->op = op;
5973 f->op2 = op2;
5975 /* Lookup the instruction. */
5976 info = lookup_opc(op << 8 | op2);
5978 /* If we found it, extract the operands. */
5979 if (info != NULL) {
5980 DisasFormat fmt = info->fmt;
5981 int i;
5983 for (i = 0; i < NUM_C_FIELD; ++i) {
5984 extract_field(f, &format_info[fmt].op[i], insn);
5987 return info;
5990 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5992 const DisasInsn *insn;
5993 ExitStatus ret = NO_EXIT;
5994 DisasFields f;
5995 DisasOps o;
5997 /* Search for the insn in the table. */
5998 insn = extract_insn(env, s, &f);
6000 /* Not found means unimplemented/illegal opcode. */
6001 if (insn == NULL) {
6002 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6003 f.op, f.op2);
6004 gen_illegal_opcode(s);
6005 return EXIT_NORETURN;
6008 #ifndef CONFIG_USER_ONLY
6009 if (s->tb->flags & FLAG_MASK_PER) {
6010 TCGv_i64 addr = tcg_const_i64(s->pc);
6011 gen_helper_per_ifetch(cpu_env, addr);
6012 tcg_temp_free_i64(addr);
6014 #endif
6016 /* Check for insn specification exceptions. */
6017 if (insn->spec) {
6018 int spec = insn->spec, excp = 0, r;
6020 if (spec & SPEC_r1_even) {
6021 r = get_field(&f, r1);
6022 if (r & 1) {
6023 excp = PGM_SPECIFICATION;
6026 if (spec & SPEC_r2_even) {
6027 r = get_field(&f, r2);
6028 if (r & 1) {
6029 excp = PGM_SPECIFICATION;
6032 if (spec & SPEC_r3_even) {
6033 r = get_field(&f, r3);
6034 if (r & 1) {
6035 excp = PGM_SPECIFICATION;
6038 if (spec & SPEC_r1_f128) {
6039 r = get_field(&f, r1);
6040 if (r > 13) {
6041 excp = PGM_SPECIFICATION;
6044 if (spec & SPEC_r2_f128) {
6045 r = get_field(&f, r2);
6046 if (r > 13) {
6047 excp = PGM_SPECIFICATION;
6050 if (excp) {
6051 gen_program_exception(s, excp);
6052 return EXIT_NORETURN;
6056 /* Set up the strutures we use to communicate with the helpers. */
6057 s->insn = insn;
6058 s->fields = &f;
6059 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6060 o.out = NULL;
6061 o.out2 = NULL;
6062 o.in1 = NULL;
6063 o.in2 = NULL;
6064 o.addr1 = NULL;
6066 /* Implement the instruction. */
6067 if (insn->help_in1) {
6068 insn->help_in1(s, &f, &o);
6070 if (insn->help_in2) {
6071 insn->help_in2(s, &f, &o);
6073 if (insn->help_prep) {
6074 insn->help_prep(s, &f, &o);
6076 if (insn->help_op) {
6077 ret = insn->help_op(s, &o);
6079 if (insn->help_wout) {
6080 insn->help_wout(s, &f, &o);
6082 if (insn->help_cout) {
6083 insn->help_cout(s, &o);
6086 /* Free any temporaries created by the helpers. */
6087 if (o.out && !o.g_out) {
6088 tcg_temp_free_i64(o.out);
6090 if (o.out2 && !o.g_out2) {
6091 tcg_temp_free_i64(o.out2);
6093 if (o.in1 && !o.g_in1) {
6094 tcg_temp_free_i64(o.in1);
6096 if (o.in2 && !o.g_in2) {
6097 tcg_temp_free_i64(o.in2);
6099 if (o.addr1) {
6100 tcg_temp_free_i64(o.addr1);
6103 #ifndef CONFIG_USER_ONLY
6104 if (s->tb->flags & FLAG_MASK_PER) {
6105 /* An exception might be triggered, save PSW if not already done. */
6106 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
6107 tcg_gen_movi_i64(psw_addr, s->next_pc);
6110 /* Call the helper to check for a possible PER exception. */
6111 gen_helper_per_check_exception(cpu_env);
6113 #endif
6115 /* Advance to the next instruction. */
6116 s->pc = s->next_pc;
6117 return ret;
6120 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
6122 CPUS390XState *env = cs->env_ptr;
6123 DisasContext dc;
6124 target_ulong pc_start;
6125 uint64_t next_page_start;
6126 int num_insns, max_insns;
6127 ExitStatus status;
6128 bool do_debug;
6130 pc_start = tb->pc;
6132 /* 31-bit mode */
6133 if (!(tb->flags & FLAG_MASK_64)) {
6134 pc_start &= 0x7fffffff;
6137 dc.tb = tb;
6138 dc.pc = pc_start;
6139 dc.cc_op = CC_OP_DYNAMIC;
6140 dc.ex_value = tb->cs_base;
6141 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
6143 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
6145 num_insns = 0;
6146 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
6147 if (max_insns == 0) {
6148 max_insns = CF_COUNT_MASK;
6150 if (max_insns > TCG_MAX_INSNS) {
6151 max_insns = TCG_MAX_INSNS;
6154 gen_tb_start(tb);
6156 do {
6157 tcg_gen_insn_start(dc.pc, dc.cc_op);
6158 num_insns++;
6160 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
6161 status = EXIT_PC_STALE;
6162 do_debug = true;
6163 /* The address covered by the breakpoint must be included in
6164 [tb->pc, tb->pc + tb->size) in order to for it to be
6165 properly cleared -- thus we increment the PC here so that
6166 the logic setting tb->size below does the right thing. */
6167 dc.pc += 2;
6168 break;
6171 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
6172 gen_io_start();
6175 status = translate_one(env, &dc);
6177 /* If we reach a page boundary, are single stepping,
6178 or exhaust instruction count, stop generation. */
6179 if (status == NO_EXIT
6180 && (dc.pc >= next_page_start
6181 || tcg_op_buf_full()
6182 || num_insns >= max_insns
6183 || singlestep
6184 || cs->singlestep_enabled
6185 || dc.ex_value)) {
6186 status = EXIT_PC_STALE;
6188 } while (status == NO_EXIT);
6190 if (tb_cflags(tb) & CF_LAST_IO) {
6191 gen_io_end();
6194 switch (status) {
6195 case EXIT_GOTO_TB:
6196 case EXIT_NORETURN:
6197 break;
6198 case EXIT_PC_STALE:
6199 case EXIT_PC_STALE_NOCHAIN:
6200 update_psw_addr(&dc);
6201 /* FALLTHRU */
6202 case EXIT_PC_UPDATED:
6203 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6204 cc op type is in env */
6205 update_cc_op(&dc);
6206 /* FALLTHRU */
6207 case EXIT_PC_CC_UPDATED:
6208 /* Exit the TB, either by raising a debug exception or by return. */
6209 if (do_debug) {
6210 gen_exception(EXCP_DEBUG);
6211 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
6212 tcg_gen_exit_tb(0);
6213 } else {
6214 tcg_gen_lookup_and_goto_ptr();
6216 break;
6217 default:
6218 g_assert_not_reached();
6221 gen_tb_end(tb, num_insns);
6223 tb->size = dc.pc - pc_start;
6224 tb->icount = num_insns;
6226 #if defined(S390X_DEBUG_DISAS)
6227 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
6228 && qemu_log_in_addr_range(pc_start)) {
6229 qemu_log_lock();
6230 if (unlikely(dc.ex_value)) {
6231 /* ??? Unfortunately log_target_disas can't use host memory. */
6232 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
6233 } else {
6234 qemu_log("IN: %s\n", lookup_symbol(pc_start));
6235 log_target_disas(cs, pc_start, dc.pc - pc_start);
6236 qemu_log("\n");
6238 qemu_log_unlock();
6240 #endif
6243 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6244 target_ulong *data)
6246 int cc_op = data[1];
6247 env->psw.addr = data[0];
6248 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6249 env->cc_op = cc_op;