s390x/tcg: add various alignment checks
[qemu/ar7.git] / target / s390x / translate.c
blob7d39ab350d53896bf43d7396b6dc367235b25822
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/log.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t ex_value;
58 uint64_t pc, next_pc;
59 uint32_t ilen;
60 enum cc_op cc_op;
61 bool singlestep_enabled;
64 /* Information carried about a condition to be evaluated. */
65 typedef struct {
66 TCGCond cond:8;
67 bool is_64;
68 bool g1;
69 bool g2;
70 union {
71 struct { TCGv_i64 a, b; } s64;
72 struct { TCGv_i32 a, b; } s32;
73 } u;
74 } DisasCompare;
76 /* is_jmp field values */
77 #define DISAS_EXCP DISAS_TARGET_0
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
91 return pc;
94 static TCGv_i64 psw_addr;
95 static TCGv_i64 psw_mask;
96 static TCGv_i64 gbea;
98 static TCGv_i32 cc_op;
99 static TCGv_i64 cc_src;
100 static TCGv_i64 cc_dst;
101 static TCGv_i64 cc_vr;
103 static char cpu_reg_names[32][4];
104 static TCGv_i64 regs[16];
105 static TCGv_i64 fregs[16];
107 void s390x_translate_init(void)
109 int i;
111 psw_addr = tcg_global_mem_new_i64(cpu_env,
112 offsetof(CPUS390XState, psw.addr),
113 "psw_addr");
114 psw_mask = tcg_global_mem_new_i64(cpu_env,
115 offsetof(CPUS390XState, psw.mask),
116 "psw_mask");
117 gbea = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUS390XState, gbea),
119 "gbea");
121 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
122 "cc_op");
123 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
124 "cc_src");
125 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
126 "cc_dst");
127 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
128 "cc_vr");
130 for (i = 0; i < 16; i++) {
131 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
132 regs[i] = tcg_global_mem_new(cpu_env,
133 offsetof(CPUS390XState, regs[i]),
134 cpu_reg_names[i]);
137 for (i = 0; i < 16; i++) {
138 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
139 fregs[i] = tcg_global_mem_new(cpu_env,
140 offsetof(CPUS390XState, vregs[i][0].d),
141 cpu_reg_names[i + 16]);
145 static TCGv_i64 load_reg(int reg)
147 TCGv_i64 r = tcg_temp_new_i64();
148 tcg_gen_mov_i64(r, regs[reg]);
149 return r;
152 static TCGv_i64 load_freg32_i64(int reg)
154 TCGv_i64 r = tcg_temp_new_i64();
155 tcg_gen_shri_i64(r, fregs[reg], 32);
156 return r;
159 static void store_reg(int reg, TCGv_i64 v)
161 tcg_gen_mov_i64(regs[reg], v);
164 static void store_freg(int reg, TCGv_i64 v)
166 tcg_gen_mov_i64(fregs[reg], v);
169 static void store_reg32_i64(int reg, TCGv_i64 v)
171 /* 32 bit register writes keep the upper half */
172 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
175 static void store_reg32h_i64(int reg, TCGv_i64 v)
177 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
180 static void store_freg32_i64(int reg, TCGv_i64 v)
182 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
185 static void return_low128(TCGv_i64 dest)
187 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
190 static void update_psw_addr(DisasContext *s)
192 /* psw.addr */
193 tcg_gen_movi_i64(psw_addr, s->pc);
196 static void per_branch(DisasContext *s, bool to_next)
198 #ifndef CONFIG_USER_ONLY
199 tcg_gen_movi_i64(gbea, s->pc);
201 if (s->tb->flags & FLAG_MASK_PER) {
202 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
203 gen_helper_per_branch(cpu_env, gbea, next_pc);
204 if (to_next) {
205 tcg_temp_free_i64(next_pc);
208 #endif
211 static void per_branch_cond(DisasContext *s, TCGCond cond,
212 TCGv_i64 arg1, TCGv_i64 arg2)
214 #ifndef CONFIG_USER_ONLY
215 if (s->tb->flags & FLAG_MASK_PER) {
216 TCGLabel *lab = gen_new_label();
217 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
219 tcg_gen_movi_i64(gbea, s->pc);
220 gen_helper_per_branch(cpu_env, gbea, psw_addr);
222 gen_set_label(lab);
223 } else {
224 TCGv_i64 pc = tcg_const_i64(s->pc);
225 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
226 tcg_temp_free_i64(pc);
228 #endif
231 static void per_breaking_event(DisasContext *s)
233 tcg_gen_movi_i64(gbea, s->pc);
236 static void update_cc_op(DisasContext *s)
238 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
239 tcg_gen_movi_i32(cc_op, s->cc_op);
243 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
245 return (uint64_t)cpu_lduw_code(env, pc);
248 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
250 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
253 static int get_mem_index(DisasContext *s)
255 if (!(s->tb->flags & FLAG_MASK_DAT)) {
256 return MMU_REAL_IDX;
259 switch (s->tb->flags & FLAG_MASK_ASC) {
260 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
261 return MMU_PRIMARY_IDX;
262 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
263 return MMU_SECONDARY_IDX;
264 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
265 return MMU_HOME_IDX;
266 default:
267 tcg_abort();
268 break;
272 static void gen_exception(int excp)
274 TCGv_i32 tmp = tcg_const_i32(excp);
275 gen_helper_exception(cpu_env, tmp);
276 tcg_temp_free_i32(tmp);
279 static void gen_program_exception(DisasContext *s, int code)
281 TCGv_i32 tmp;
283 /* Remember what pgm exeption this was. */
284 tmp = tcg_const_i32(code);
285 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
286 tcg_temp_free_i32(tmp);
288 tmp = tcg_const_i32(s->ilen);
289 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
290 tcg_temp_free_i32(tmp);
292 /* update the psw */
293 update_psw_addr(s);
295 /* Save off cc. */
296 update_cc_op(s);
298 /* Trigger exception. */
299 gen_exception(EXCP_PGM);
302 static inline void gen_illegal_opcode(DisasContext *s)
304 gen_program_exception(s, PGM_OPERATION);
307 static inline void gen_trap(DisasContext *s)
309 TCGv_i32 t;
311 /* Set DXC to 0xff. */
312 t = tcg_temp_new_i32();
313 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
314 tcg_gen_ori_i32(t, t, 0xff00);
315 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
316 tcg_temp_free_i32(t);
318 gen_program_exception(s, PGM_DATA);
321 #ifndef CONFIG_USER_ONLY
322 static void check_privileged(DisasContext *s)
324 if (s->tb->flags & FLAG_MASK_PSTATE) {
325 gen_program_exception(s, PGM_PRIVILEGED);
328 #endif
330 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
332 TCGv_i64 tmp = tcg_temp_new_i64();
333 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
335 /* Note that d2 is limited to 20 bits, signed. If we crop negative
336 displacements early we create larger immedate addends. */
338 /* Note that addi optimizes the imm==0 case. */
339 if (b2 && x2) {
340 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
341 tcg_gen_addi_i64(tmp, tmp, d2);
342 } else if (b2) {
343 tcg_gen_addi_i64(tmp, regs[b2], d2);
344 } else if (x2) {
345 tcg_gen_addi_i64(tmp, regs[x2], d2);
346 } else {
347 if (need_31) {
348 d2 &= 0x7fffffff;
349 need_31 = false;
351 tcg_gen_movi_i64(tmp, d2);
353 if (need_31) {
354 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
357 return tmp;
360 static inline bool live_cc_data(DisasContext *s)
362 return (s->cc_op != CC_OP_DYNAMIC
363 && s->cc_op != CC_OP_STATIC
364 && s->cc_op > 3);
367 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
369 if (live_cc_data(s)) {
370 tcg_gen_discard_i64(cc_src);
371 tcg_gen_discard_i64(cc_dst);
372 tcg_gen_discard_i64(cc_vr);
374 s->cc_op = CC_OP_CONST0 + val;
377 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_vr);
383 tcg_gen_mov_i64(cc_dst, dst);
384 s->cc_op = op;
387 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
388 TCGv_i64 dst)
390 if (live_cc_data(s)) {
391 tcg_gen_discard_i64(cc_vr);
393 tcg_gen_mov_i64(cc_src, src);
394 tcg_gen_mov_i64(cc_dst, dst);
395 s->cc_op = op;
398 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
399 TCGv_i64 dst, TCGv_i64 vr)
401 tcg_gen_mov_i64(cc_src, src);
402 tcg_gen_mov_i64(cc_dst, dst);
403 tcg_gen_mov_i64(cc_vr, vr);
404 s->cc_op = op;
407 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
409 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
412 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
414 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
417 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
419 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
422 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
424 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
427 /* CC value is in env->cc_op */
428 static void set_cc_static(DisasContext *s)
430 if (live_cc_data(s)) {
431 tcg_gen_discard_i64(cc_src);
432 tcg_gen_discard_i64(cc_dst);
433 tcg_gen_discard_i64(cc_vr);
435 s->cc_op = CC_OP_STATIC;
438 /* calculates cc into cc_op */
439 static void gen_op_calc_cc(DisasContext *s)
441 TCGv_i32 local_cc_op = NULL;
442 TCGv_i64 dummy = NULL;
444 switch (s->cc_op) {
445 default:
446 dummy = tcg_const_i64(0);
447 /* FALLTHRU */
448 case CC_OP_ADD_64:
449 case CC_OP_ADDU_64:
450 case CC_OP_ADDC_64:
451 case CC_OP_SUB_64:
452 case CC_OP_SUBU_64:
453 case CC_OP_SUBB_64:
454 case CC_OP_ADD_32:
455 case CC_OP_ADDU_32:
456 case CC_OP_ADDC_32:
457 case CC_OP_SUB_32:
458 case CC_OP_SUBU_32:
459 case CC_OP_SUBB_32:
460 local_cc_op = tcg_const_i32(s->cc_op);
461 break;
462 case CC_OP_CONST0:
463 case CC_OP_CONST1:
464 case CC_OP_CONST2:
465 case CC_OP_CONST3:
466 case CC_OP_STATIC:
467 case CC_OP_DYNAMIC:
468 break;
471 switch (s->cc_op) {
472 case CC_OP_CONST0:
473 case CC_OP_CONST1:
474 case CC_OP_CONST2:
475 case CC_OP_CONST3:
476 /* s->cc_op is the cc value */
477 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
478 break;
479 case CC_OP_STATIC:
480 /* env->cc_op already is the cc value */
481 break;
482 case CC_OP_NZ:
483 case CC_OP_ABS_64:
484 case CC_OP_NABS_64:
485 case CC_OP_ABS_32:
486 case CC_OP_NABS_32:
487 case CC_OP_LTGT0_32:
488 case CC_OP_LTGT0_64:
489 case CC_OP_COMP_32:
490 case CC_OP_COMP_64:
491 case CC_OP_NZ_F32:
492 case CC_OP_NZ_F64:
493 case CC_OP_FLOGR:
494 /* 1 argument */
495 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
496 break;
497 case CC_OP_ICM:
498 case CC_OP_LTGT_32:
499 case CC_OP_LTGT_64:
500 case CC_OP_LTUGTU_32:
501 case CC_OP_LTUGTU_64:
502 case CC_OP_TM_32:
503 case CC_OP_TM_64:
504 case CC_OP_SLA_32:
505 case CC_OP_SLA_64:
506 case CC_OP_NZ_F128:
507 /* 2 arguments */
508 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
509 break;
510 case CC_OP_ADD_64:
511 case CC_OP_ADDU_64:
512 case CC_OP_ADDC_64:
513 case CC_OP_SUB_64:
514 case CC_OP_SUBU_64:
515 case CC_OP_SUBB_64:
516 case CC_OP_ADD_32:
517 case CC_OP_ADDU_32:
518 case CC_OP_ADDC_32:
519 case CC_OP_SUB_32:
520 case CC_OP_SUBU_32:
521 case CC_OP_SUBB_32:
522 /* 3 arguments */
523 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
524 break;
525 case CC_OP_DYNAMIC:
526 /* unknown operation - assume 3 arguments and cc_op in env */
527 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
528 break;
529 default:
530 tcg_abort();
533 if (local_cc_op) {
534 tcg_temp_free_i32(local_cc_op);
536 if (dummy) {
537 tcg_temp_free_i64(dummy);
540 /* We now have cc in cc_op as constant */
541 set_cc_static(s);
544 static bool use_exit_tb(DisasContext *s)
546 return (s->singlestep_enabled ||
547 (tb_cflags(s->tb) & CF_LAST_IO) ||
548 (s->tb->flags & FLAG_MASK_PER));
551 static bool use_goto_tb(DisasContext *s, uint64_t dest)
553 if (unlikely(use_exit_tb(s))) {
554 return false;
556 #ifndef CONFIG_USER_ONLY
557 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
558 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
559 #else
560 return true;
561 #endif
564 static void account_noninline_branch(DisasContext *s, int cc_op)
566 #ifdef DEBUG_INLINE_BRANCHES
567 inline_branch_miss[cc_op]++;
568 #endif
571 static void account_inline_branch(DisasContext *s, int cc_op)
573 #ifdef DEBUG_INLINE_BRANCHES
574 inline_branch_hit[cc_op]++;
575 #endif
578 /* Table of mask values to comparison codes, given a comparison as input.
579 For such, CC=3 should not be possible. */
580 static const TCGCond ltgt_cond[16] = {
581 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
582 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
583 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
584 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
585 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
586 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
587 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
588 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
591 /* Table of mask values to comparison codes, given a logic op as input.
592 For such, only CC=0 and CC=1 should be possible. */
593 static const TCGCond nz_cond[16] = {
594 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
595 TCG_COND_NEVER, TCG_COND_NEVER,
596 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
597 TCG_COND_NE, TCG_COND_NE,
598 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
599 TCG_COND_EQ, TCG_COND_EQ,
600 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
601 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
604 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
605 details required to generate a TCG comparison. */
606 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
608 TCGCond cond;
609 enum cc_op old_cc_op = s->cc_op;
611 if (mask == 15 || mask == 0) {
612 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
613 c->u.s32.a = cc_op;
614 c->u.s32.b = cc_op;
615 c->g1 = c->g2 = true;
616 c->is_64 = false;
617 return;
620 /* Find the TCG condition for the mask + cc op. */
621 switch (old_cc_op) {
622 case CC_OP_LTGT0_32:
623 case CC_OP_LTGT0_64:
624 case CC_OP_LTGT_32:
625 case CC_OP_LTGT_64:
626 cond = ltgt_cond[mask];
627 if (cond == TCG_COND_NEVER) {
628 goto do_dynamic;
630 account_inline_branch(s, old_cc_op);
631 break;
633 case CC_OP_LTUGTU_32:
634 case CC_OP_LTUGTU_64:
635 cond = tcg_unsigned_cond(ltgt_cond[mask]);
636 if (cond == TCG_COND_NEVER) {
637 goto do_dynamic;
639 account_inline_branch(s, old_cc_op);
640 break;
642 case CC_OP_NZ:
643 cond = nz_cond[mask];
644 if (cond == TCG_COND_NEVER) {
645 goto do_dynamic;
647 account_inline_branch(s, old_cc_op);
648 break;
650 case CC_OP_TM_32:
651 case CC_OP_TM_64:
652 switch (mask) {
653 case 8:
654 cond = TCG_COND_EQ;
655 break;
656 case 4 | 2 | 1:
657 cond = TCG_COND_NE;
658 break;
659 default:
660 goto do_dynamic;
662 account_inline_branch(s, old_cc_op);
663 break;
665 case CC_OP_ICM:
666 switch (mask) {
667 case 8:
668 cond = TCG_COND_EQ;
669 break;
670 case 4 | 2 | 1:
671 case 4 | 2:
672 cond = TCG_COND_NE;
673 break;
674 default:
675 goto do_dynamic;
677 account_inline_branch(s, old_cc_op);
678 break;
680 case CC_OP_FLOGR:
681 switch (mask & 0xa) {
682 case 8: /* src == 0 -> no one bit found */
683 cond = TCG_COND_EQ;
684 break;
685 case 2: /* src != 0 -> one bit found */
686 cond = TCG_COND_NE;
687 break;
688 default:
689 goto do_dynamic;
691 account_inline_branch(s, old_cc_op);
692 break;
694 case CC_OP_ADDU_32:
695 case CC_OP_ADDU_64:
696 switch (mask) {
697 case 8 | 2: /* vr == 0 */
698 cond = TCG_COND_EQ;
699 break;
700 case 4 | 1: /* vr != 0 */
701 cond = TCG_COND_NE;
702 break;
703 case 8 | 4: /* no carry -> vr >= src */
704 cond = TCG_COND_GEU;
705 break;
706 case 2 | 1: /* carry -> vr < src */
707 cond = TCG_COND_LTU;
708 break;
709 default:
710 goto do_dynamic;
712 account_inline_branch(s, old_cc_op);
713 break;
715 case CC_OP_SUBU_32:
716 case CC_OP_SUBU_64:
717 /* Note that CC=0 is impossible; treat it as dont-care. */
718 switch (mask & 7) {
719 case 2: /* zero -> op1 == op2 */
720 cond = TCG_COND_EQ;
721 break;
722 case 4 | 1: /* !zero -> op1 != op2 */
723 cond = TCG_COND_NE;
724 break;
725 case 4: /* borrow (!carry) -> op1 < op2 */
726 cond = TCG_COND_LTU;
727 break;
728 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
729 cond = TCG_COND_GEU;
730 break;
731 default:
732 goto do_dynamic;
734 account_inline_branch(s, old_cc_op);
735 break;
737 default:
738 do_dynamic:
739 /* Calculate cc value. */
740 gen_op_calc_cc(s);
741 /* FALLTHRU */
743 case CC_OP_STATIC:
744 /* Jump based on CC. We'll load up the real cond below;
745 the assignment here merely avoids a compiler warning. */
746 account_noninline_branch(s, old_cc_op);
747 old_cc_op = CC_OP_STATIC;
748 cond = TCG_COND_NEVER;
749 break;
752 /* Load up the arguments of the comparison. */
753 c->is_64 = true;
754 c->g1 = c->g2 = false;
755 switch (old_cc_op) {
756 case CC_OP_LTGT0_32:
757 c->is_64 = false;
758 c->u.s32.a = tcg_temp_new_i32();
759 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
760 c->u.s32.b = tcg_const_i32(0);
761 break;
762 case CC_OP_LTGT_32:
763 case CC_OP_LTUGTU_32:
764 case CC_OP_SUBU_32:
765 c->is_64 = false;
766 c->u.s32.a = tcg_temp_new_i32();
767 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
768 c->u.s32.b = tcg_temp_new_i32();
769 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
770 break;
772 case CC_OP_LTGT0_64:
773 case CC_OP_NZ:
774 case CC_OP_FLOGR:
775 c->u.s64.a = cc_dst;
776 c->u.s64.b = tcg_const_i64(0);
777 c->g1 = true;
778 break;
779 case CC_OP_LTGT_64:
780 case CC_OP_LTUGTU_64:
781 case CC_OP_SUBU_64:
782 c->u.s64.a = cc_src;
783 c->u.s64.b = cc_dst;
784 c->g1 = c->g2 = true;
785 break;
787 case CC_OP_TM_32:
788 case CC_OP_TM_64:
789 case CC_OP_ICM:
790 c->u.s64.a = tcg_temp_new_i64();
791 c->u.s64.b = tcg_const_i64(0);
792 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
793 break;
795 case CC_OP_ADDU_32:
796 c->is_64 = false;
797 c->u.s32.a = tcg_temp_new_i32();
798 c->u.s32.b = tcg_temp_new_i32();
799 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
800 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
801 tcg_gen_movi_i32(c->u.s32.b, 0);
802 } else {
803 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
805 break;
807 case CC_OP_ADDU_64:
808 c->u.s64.a = cc_vr;
809 c->g1 = true;
810 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
811 c->u.s64.b = tcg_const_i64(0);
812 } else {
813 c->u.s64.b = cc_src;
814 c->g2 = true;
816 break;
818 case CC_OP_STATIC:
819 c->is_64 = false;
820 c->u.s32.a = cc_op;
821 c->g1 = true;
822 switch (mask) {
823 case 0x8 | 0x4 | 0x2: /* cc != 3 */
824 cond = TCG_COND_NE;
825 c->u.s32.b = tcg_const_i32(3);
826 break;
827 case 0x8 | 0x4 | 0x1: /* cc != 2 */
828 cond = TCG_COND_NE;
829 c->u.s32.b = tcg_const_i32(2);
830 break;
831 case 0x8 | 0x2 | 0x1: /* cc != 1 */
832 cond = TCG_COND_NE;
833 c->u.s32.b = tcg_const_i32(1);
834 break;
835 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
836 cond = TCG_COND_EQ;
837 c->g1 = false;
838 c->u.s32.a = tcg_temp_new_i32();
839 c->u.s32.b = tcg_const_i32(0);
840 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
841 break;
842 case 0x8 | 0x4: /* cc < 2 */
843 cond = TCG_COND_LTU;
844 c->u.s32.b = tcg_const_i32(2);
845 break;
846 case 0x8: /* cc == 0 */
847 cond = TCG_COND_EQ;
848 c->u.s32.b = tcg_const_i32(0);
849 break;
850 case 0x4 | 0x2 | 0x1: /* cc != 0 */
851 cond = TCG_COND_NE;
852 c->u.s32.b = tcg_const_i32(0);
853 break;
854 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
855 cond = TCG_COND_NE;
856 c->g1 = false;
857 c->u.s32.a = tcg_temp_new_i32();
858 c->u.s32.b = tcg_const_i32(0);
859 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
860 break;
861 case 0x4: /* cc == 1 */
862 cond = TCG_COND_EQ;
863 c->u.s32.b = tcg_const_i32(1);
864 break;
865 case 0x2 | 0x1: /* cc > 1 */
866 cond = TCG_COND_GTU;
867 c->u.s32.b = tcg_const_i32(1);
868 break;
869 case 0x2: /* cc == 2 */
870 cond = TCG_COND_EQ;
871 c->u.s32.b = tcg_const_i32(2);
872 break;
873 case 0x1: /* cc == 3 */
874 cond = TCG_COND_EQ;
875 c->u.s32.b = tcg_const_i32(3);
876 break;
877 default:
878 /* CC is masked by something else: (8 >> cc) & mask. */
879 cond = TCG_COND_NE;
880 c->g1 = false;
881 c->u.s32.a = tcg_const_i32(8);
882 c->u.s32.b = tcg_const_i32(0);
883 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
884 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
885 break;
887 break;
889 default:
890 abort();
892 c->cond = cond;
895 static void free_compare(DisasCompare *c)
897 if (!c->g1) {
898 if (c->is_64) {
899 tcg_temp_free_i64(c->u.s64.a);
900 } else {
901 tcg_temp_free_i32(c->u.s32.a);
904 if (!c->g2) {
905 if (c->is_64) {
906 tcg_temp_free_i64(c->u.s64.b);
907 } else {
908 tcg_temp_free_i32(c->u.s32.b);
913 /* ====================================================================== */
914 /* Define the insn format enumeration. */
915 #define F0(N) FMT_##N,
916 #define F1(N, X1) F0(N)
917 #define F2(N, X1, X2) F0(N)
918 #define F3(N, X1, X2, X3) F0(N)
919 #define F4(N, X1, X2, X3, X4) F0(N)
920 #define F5(N, X1, X2, X3, X4, X5) F0(N)
922 typedef enum {
923 #include "insn-format.def"
924 } DisasFormat;
926 #undef F0
927 #undef F1
928 #undef F2
929 #undef F3
930 #undef F4
931 #undef F5
933 /* Define a structure to hold the decoded fields. We'll store each inside
934 an array indexed by an enum. In order to conserve memory, we'll arrange
935 for fields that do not exist at the same time to overlap, thus the "C"
936 for compact. For checking purposes there is an "O" for original index
937 as well that will be applied to availability bitmaps. */
939 enum DisasFieldIndexO {
940 FLD_O_r1,
941 FLD_O_r2,
942 FLD_O_r3,
943 FLD_O_m1,
944 FLD_O_m3,
945 FLD_O_m4,
946 FLD_O_b1,
947 FLD_O_b2,
948 FLD_O_b4,
949 FLD_O_d1,
950 FLD_O_d2,
951 FLD_O_d4,
952 FLD_O_x2,
953 FLD_O_l1,
954 FLD_O_l2,
955 FLD_O_i1,
956 FLD_O_i2,
957 FLD_O_i3,
958 FLD_O_i4,
959 FLD_O_i5
962 enum DisasFieldIndexC {
963 FLD_C_r1 = 0,
964 FLD_C_m1 = 0,
965 FLD_C_b1 = 0,
966 FLD_C_i1 = 0,
968 FLD_C_r2 = 1,
969 FLD_C_b2 = 1,
970 FLD_C_i2 = 1,
972 FLD_C_r3 = 2,
973 FLD_C_m3 = 2,
974 FLD_C_i3 = 2,
976 FLD_C_m4 = 3,
977 FLD_C_b4 = 3,
978 FLD_C_i4 = 3,
979 FLD_C_l1 = 3,
981 FLD_C_i5 = 4,
982 FLD_C_d1 = 4,
984 FLD_C_d2 = 5,
986 FLD_C_d4 = 6,
987 FLD_C_x2 = 6,
988 FLD_C_l2 = 6,
990 NUM_C_FIELD = 7
993 struct DisasFields {
994 uint64_t raw_insn;
995 unsigned op:8;
996 unsigned op2:8;
997 unsigned presentC:16;
998 unsigned int presentO;
999 int c[NUM_C_FIELD];
1002 /* This is the way fields are to be accessed out of DisasFields. */
1003 #define have_field(S, F) have_field1((S), FLD_O_##F)
1004 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1006 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1008 return (f->presentO >> c) & 1;
1011 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1012 enum DisasFieldIndexC c)
1014 assert(have_field1(f, o));
1015 return f->c[c];
1018 /* Describe the layout of each field in each format. */
1019 typedef struct DisasField {
1020 unsigned int beg:8;
1021 unsigned int size:8;
1022 unsigned int type:2;
1023 unsigned int indexC:6;
1024 enum DisasFieldIndexO indexO:8;
1025 } DisasField;
1027 typedef struct DisasFormatInfo {
1028 DisasField op[NUM_C_FIELD];
1029 } DisasFormatInfo;
1031 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1032 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1033 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1044 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1046 #define F0(N) { { } },
1047 #define F1(N, X1) { { X1 } },
1048 #define F2(N, X1, X2) { { X1, X2 } },
1049 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1050 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1051 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1053 static const DisasFormatInfo format_info[] = {
1054 #include "insn-format.def"
1057 #undef F0
1058 #undef F1
1059 #undef F2
1060 #undef F3
1061 #undef F4
1062 #undef F5
1063 #undef R
1064 #undef M
1065 #undef BD
1066 #undef BXD
1067 #undef BDL
1068 #undef BXDL
1069 #undef I
1070 #undef L
1072 /* Generally, we'll extract operands into this structures, operate upon
1073 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1074 of routines below for more details. */
1075 typedef struct {
1076 bool g_out, g_out2, g_in1, g_in2;
1077 TCGv_i64 out, out2, in1, in2;
1078 TCGv_i64 addr1;
1079 } DisasOps;
1081 /* Instructions can place constraints on their operands, raising specification
1082 exceptions if they are violated. To make this easy to automate, each "in1",
1083 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084 of the following, or 0. To make this easy to document, we'll put the
1085 SPEC_<name> defines next to <name>. */
1087 #define SPEC_r1_even 1
1088 #define SPEC_r2_even 2
1089 #define SPEC_r3_even 4
1090 #define SPEC_r1_f128 8
1091 #define SPEC_r2_f128 16
1093 /* Return values from translate_one, indicating the state of the TB. */
1094 typedef enum {
1095 /* Continue the TB. */
1096 NO_EXIT,
1097 /* We have emitted one or more goto_tb. No fixup required. */
1098 EXIT_GOTO_TB,
1099 /* We are not using a goto_tb (for whatever reason), but have updated
1100 the PC (for whatever reason), so there's no need to do it again on
1101 exiting the TB. */
1102 EXIT_PC_UPDATED,
1103 /* We have updated the PC and CC values. */
1104 EXIT_PC_CC_UPDATED,
1105 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1106 updated the PC for the next instruction to be executed. */
1107 EXIT_PC_STALE,
1108 /* We are exiting the TB to the main loop. */
1109 EXIT_PC_STALE_NOCHAIN,
1110 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1111 No following code will be executed. */
1112 EXIT_NORETURN,
1113 } ExitStatus;
1115 struct DisasInsn {
1116 unsigned opc:16;
1117 DisasFormat fmt:8;
1118 unsigned fac:8;
1119 unsigned spec:8;
1121 const char *name;
1123 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1124 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1125 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1126 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1127 void (*help_cout)(DisasContext *, DisasOps *);
1128 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1130 uint64_t data;
1133 /* ====================================================================== */
1134 /* Miscellaneous helpers, used by several operations. */
1136 static void help_l2_shift(DisasContext *s, DisasFields *f,
1137 DisasOps *o, int mask)
1139 int b2 = get_field(f, b2);
1140 int d2 = get_field(f, d2);
1142 if (b2 == 0) {
1143 o->in2 = tcg_const_i64(d2 & mask);
1144 } else {
1145 o->in2 = get_address(s, 0, b2, d2);
1146 tcg_gen_andi_i64(o->in2, o->in2, mask);
1150 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1152 if (dest == s->next_pc) {
1153 per_branch(s, true);
1154 return NO_EXIT;
1156 if (use_goto_tb(s, dest)) {
1157 update_cc_op(s);
1158 per_breaking_event(s);
1159 tcg_gen_goto_tb(0);
1160 tcg_gen_movi_i64(psw_addr, dest);
1161 tcg_gen_exit_tb((uintptr_t)s->tb);
1162 return EXIT_GOTO_TB;
1163 } else {
1164 tcg_gen_movi_i64(psw_addr, dest);
1165 per_branch(s, false);
1166 return EXIT_PC_UPDATED;
1170 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1171 bool is_imm, int imm, TCGv_i64 cdest)
1173 ExitStatus ret;
1174 uint64_t dest = s->pc + 2 * imm;
1175 TCGLabel *lab;
1177 /* Take care of the special cases first. */
1178 if (c->cond == TCG_COND_NEVER) {
1179 ret = NO_EXIT;
1180 goto egress;
1182 if (is_imm) {
1183 if (dest == s->next_pc) {
1184 /* Branch to next. */
1185 per_branch(s, true);
1186 ret = NO_EXIT;
1187 goto egress;
1189 if (c->cond == TCG_COND_ALWAYS) {
1190 ret = help_goto_direct(s, dest);
1191 goto egress;
1193 } else {
1194 if (!cdest) {
1195 /* E.g. bcr %r0 -> no branch. */
1196 ret = NO_EXIT;
1197 goto egress;
1199 if (c->cond == TCG_COND_ALWAYS) {
1200 tcg_gen_mov_i64(psw_addr, cdest);
1201 per_branch(s, false);
1202 ret = EXIT_PC_UPDATED;
1203 goto egress;
1207 if (use_goto_tb(s, s->next_pc)) {
1208 if (is_imm && use_goto_tb(s, dest)) {
1209 /* Both exits can use goto_tb. */
1210 update_cc_op(s);
1212 lab = gen_new_label();
1213 if (c->is_64) {
1214 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1215 } else {
1216 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1219 /* Branch not taken. */
1220 tcg_gen_goto_tb(0);
1221 tcg_gen_movi_i64(psw_addr, s->next_pc);
1222 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1224 /* Branch taken. */
1225 gen_set_label(lab);
1226 per_breaking_event(s);
1227 tcg_gen_goto_tb(1);
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1231 ret = EXIT_GOTO_TB;
1232 } else {
1233 /* Fallthru can use goto_tb, but taken branch cannot. */
1234 /* Store taken branch destination before the brcond. This
1235 avoids having to allocate a new local temp to hold it.
1236 We'll overwrite this in the not taken case anyway. */
1237 if (!is_imm) {
1238 tcg_gen_mov_i64(psw_addr, cdest);
1241 lab = gen_new_label();
1242 if (c->is_64) {
1243 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1244 } else {
1245 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1248 /* Branch not taken. */
1249 update_cc_op(s);
1250 tcg_gen_goto_tb(0);
1251 tcg_gen_movi_i64(psw_addr, s->next_pc);
1252 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1254 gen_set_label(lab);
1255 if (is_imm) {
1256 tcg_gen_movi_i64(psw_addr, dest);
1258 per_breaking_event(s);
1259 ret = EXIT_PC_UPDATED;
1261 } else {
1262 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1263 Most commonly we're single-stepping or some other condition that
1264 disables all use of goto_tb. Just update the PC and exit. */
1266 TCGv_i64 next = tcg_const_i64(s->next_pc);
1267 if (is_imm) {
1268 cdest = tcg_const_i64(dest);
1271 if (c->is_64) {
1272 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1273 cdest, next);
1274 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1275 } else {
1276 TCGv_i32 t0 = tcg_temp_new_i32();
1277 TCGv_i64 t1 = tcg_temp_new_i64();
1278 TCGv_i64 z = tcg_const_i64(0);
1279 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1280 tcg_gen_extu_i32_i64(t1, t0);
1281 tcg_temp_free_i32(t0);
1282 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1283 per_branch_cond(s, TCG_COND_NE, t1, z);
1284 tcg_temp_free_i64(t1);
1285 tcg_temp_free_i64(z);
1288 if (is_imm) {
1289 tcg_temp_free_i64(cdest);
1291 tcg_temp_free_i64(next);
1293 ret = EXIT_PC_UPDATED;
1296 egress:
1297 free_compare(c);
1298 return ret;
1301 /* ====================================================================== */
1302 /* The operations. These perform the bulk of the work for any insn,
1303 usually after the operands have been loaded and output initialized. */
1305 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1307 TCGv_i64 z, n;
1308 z = tcg_const_i64(0);
1309 n = tcg_temp_new_i64();
1310 tcg_gen_neg_i64(n, o->in2);
1311 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1312 tcg_temp_free_i64(n);
1313 tcg_temp_free_i64(z);
1314 return NO_EXIT;
1317 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1319 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1320 return NO_EXIT;
1323 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1325 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1326 return NO_EXIT;
1329 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1331 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1332 tcg_gen_mov_i64(o->out2, o->in2);
1333 return NO_EXIT;
1336 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1338 tcg_gen_add_i64(o->out, o->in1, o->in2);
1339 return NO_EXIT;
1342 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1344 DisasCompare cmp;
1345 TCGv_i64 carry;
1347 tcg_gen_add_i64(o->out, o->in1, o->in2);
1349 /* The carry flag is the msb of CC, therefore the branch mask that would
1350 create that comparison is 3. Feeding the generated comparison to
1351 setcond produces the carry flag that we desire. */
1352 disas_jcc(s, &cmp, 3);
1353 carry = tcg_temp_new_i64();
1354 if (cmp.is_64) {
1355 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1356 } else {
1357 TCGv_i32 t = tcg_temp_new_i32();
1358 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1359 tcg_gen_extu_i32_i64(carry, t);
1360 tcg_temp_free_i32(t);
1362 free_compare(&cmp);
1364 tcg_gen_add_i64(o->out, o->out, carry);
1365 tcg_temp_free_i64(carry);
1366 return NO_EXIT;
1369 static ExitStatus op_asi(DisasContext *s, DisasOps *o)
1371 o->in1 = tcg_temp_new_i64();
1373 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1374 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1375 } else {
1376 /* Perform the atomic addition in memory. */
1377 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1378 s->insn->data);
1381 /* Recompute also for atomic case: needed for setting CC. */
1382 tcg_gen_add_i64(o->out, o->in1, o->in2);
1384 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1385 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1387 return NO_EXIT;
1390 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1392 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1393 return NO_EXIT;
1396 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1398 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1399 return NO_EXIT;
1402 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1404 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1405 return_low128(o->out2);
1406 return NO_EXIT;
1409 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1411 tcg_gen_and_i64(o->out, o->in1, o->in2);
1412 return NO_EXIT;
1415 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1417 int shift = s->insn->data & 0xff;
1418 int size = s->insn->data >> 8;
1419 uint64_t mask = ((1ull << size) - 1) << shift;
1421 assert(!o->g_in2);
1422 tcg_gen_shli_i64(o->in2, o->in2, shift);
1423 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1424 tcg_gen_and_i64(o->out, o->in1, o->in2);
1426 /* Produce the CC from only the bits manipulated. */
1427 tcg_gen_andi_i64(cc_dst, o->out, mask);
1428 set_cc_nz_u64(s, cc_dst);
1429 return NO_EXIT;
1432 static ExitStatus op_ni(DisasContext *s, DisasOps *o)
1434 o->in1 = tcg_temp_new_i64();
1436 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1437 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1438 } else {
1439 /* Perform the atomic operation in memory. */
1440 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1441 s->insn->data);
1444 /* Recompute also for atomic case: needed for setting CC. */
1445 tcg_gen_and_i64(o->out, o->in1, o->in2);
1447 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1448 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1450 return NO_EXIT;
1453 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1455 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1456 if (o->in2) {
1457 tcg_gen_mov_i64(psw_addr, o->in2);
1458 per_branch(s, false);
1459 return EXIT_PC_UPDATED;
1460 } else {
1461 return NO_EXIT;
1465 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1467 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1468 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1471 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1473 int m1 = get_field(s->fields, m1);
1474 bool is_imm = have_field(s->fields, i2);
1475 int imm = is_imm ? get_field(s->fields, i2) : 0;
1476 DisasCompare c;
1478 /* BCR with R2 = 0 causes no branching */
1479 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1480 if (m1 == 14) {
1481 /* Perform serialization */
1482 /* FIXME: check for fast-BCR-serialization facility */
1483 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1485 if (m1 == 15) {
1486 /* Perform serialization */
1487 /* FIXME: perform checkpoint-synchronisation */
1488 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1490 return NO_EXIT;
1493 disas_jcc(s, &c, m1);
1494 return help_branch(s, &c, is_imm, imm, o->in2);
1497 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1499 int r1 = get_field(s->fields, r1);
1500 bool is_imm = have_field(s->fields, i2);
1501 int imm = is_imm ? get_field(s->fields, i2) : 0;
1502 DisasCompare c;
1503 TCGv_i64 t;
1505 c.cond = TCG_COND_NE;
1506 c.is_64 = false;
1507 c.g1 = false;
1508 c.g2 = false;
1510 t = tcg_temp_new_i64();
1511 tcg_gen_subi_i64(t, regs[r1], 1);
1512 store_reg32_i64(r1, t);
1513 c.u.s32.a = tcg_temp_new_i32();
1514 c.u.s32.b = tcg_const_i32(0);
1515 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1516 tcg_temp_free_i64(t);
1518 return help_branch(s, &c, is_imm, imm, o->in2);
1521 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1523 int r1 = get_field(s->fields, r1);
1524 int imm = get_field(s->fields, i2);
1525 DisasCompare c;
1526 TCGv_i64 t;
1528 c.cond = TCG_COND_NE;
1529 c.is_64 = false;
1530 c.g1 = false;
1531 c.g2 = false;
1533 t = tcg_temp_new_i64();
1534 tcg_gen_shri_i64(t, regs[r1], 32);
1535 tcg_gen_subi_i64(t, t, 1);
1536 store_reg32h_i64(r1, t);
1537 c.u.s32.a = tcg_temp_new_i32();
1538 c.u.s32.b = tcg_const_i32(0);
1539 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1540 tcg_temp_free_i64(t);
1542 return help_branch(s, &c, 1, imm, o->in2);
1545 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1547 int r1 = get_field(s->fields, r1);
1548 bool is_imm = have_field(s->fields, i2);
1549 int imm = is_imm ? get_field(s->fields, i2) : 0;
1550 DisasCompare c;
1552 c.cond = TCG_COND_NE;
1553 c.is_64 = true;
1554 c.g1 = true;
1555 c.g2 = false;
1557 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1558 c.u.s64.a = regs[r1];
1559 c.u.s64.b = tcg_const_i64(0);
1561 return help_branch(s, &c, is_imm, imm, o->in2);
1564 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1566 int r1 = get_field(s->fields, r1);
1567 int r3 = get_field(s->fields, r3);
1568 bool is_imm = have_field(s->fields, i2);
1569 int imm = is_imm ? get_field(s->fields, i2) : 0;
1570 DisasCompare c;
1571 TCGv_i64 t;
1573 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1574 c.is_64 = false;
1575 c.g1 = false;
1576 c.g2 = false;
1578 t = tcg_temp_new_i64();
1579 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1580 c.u.s32.a = tcg_temp_new_i32();
1581 c.u.s32.b = tcg_temp_new_i32();
1582 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1583 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1584 store_reg32_i64(r1, t);
1585 tcg_temp_free_i64(t);
1587 return help_branch(s, &c, is_imm, imm, o->in2);
1590 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1592 int r1 = get_field(s->fields, r1);
1593 int r3 = get_field(s->fields, r3);
1594 bool is_imm = have_field(s->fields, i2);
1595 int imm = is_imm ? get_field(s->fields, i2) : 0;
1596 DisasCompare c;
1598 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1599 c.is_64 = true;
1601 if (r1 == (r3 | 1)) {
1602 c.u.s64.b = load_reg(r3 | 1);
1603 c.g2 = false;
1604 } else {
1605 c.u.s64.b = regs[r3 | 1];
1606 c.g2 = true;
1609 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1610 c.u.s64.a = regs[r1];
1611 c.g1 = true;
1613 return help_branch(s, &c, is_imm, imm, o->in2);
1616 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1618 int imm, m3 = get_field(s->fields, m3);
1619 bool is_imm;
1620 DisasCompare c;
1622 c.cond = ltgt_cond[m3];
1623 if (s->insn->data) {
1624 c.cond = tcg_unsigned_cond(c.cond);
1626 c.is_64 = c.g1 = c.g2 = true;
1627 c.u.s64.a = o->in1;
1628 c.u.s64.b = o->in2;
1630 is_imm = have_field(s->fields, i4);
1631 if (is_imm) {
1632 imm = get_field(s->fields, i4);
1633 } else {
1634 imm = 0;
1635 o->out = get_address(s, 0, get_field(s->fields, b4),
1636 get_field(s->fields, d4));
1639 return help_branch(s, &c, is_imm, imm, o->out);
1642 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1644 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1645 set_cc_static(s);
1646 return NO_EXIT;
1649 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1651 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1652 set_cc_static(s);
1653 return NO_EXIT;
1656 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1658 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1659 set_cc_static(s);
1660 return NO_EXIT;
1663 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1665 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1666 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1667 tcg_temp_free_i32(m3);
1668 gen_set_cc_nz_f32(s, o->in2);
1669 return NO_EXIT;
1672 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1674 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1675 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1676 tcg_temp_free_i32(m3);
1677 gen_set_cc_nz_f64(s, o->in2);
1678 return NO_EXIT;
1681 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1683 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1684 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1685 tcg_temp_free_i32(m3);
1686 gen_set_cc_nz_f128(s, o->in1, o->in2);
1687 return NO_EXIT;
1690 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 gen_set_cc_nz_f32(s, o->in2);
1696 return NO_EXIT;
1699 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 gen_set_cc_nz_f64(s, o->in2);
1705 return NO_EXIT;
1708 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 gen_set_cc_nz_f128(s, o->in1, o->in2);
1714 return NO_EXIT;
1717 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 gen_set_cc_nz_f32(s, o->in2);
1723 return NO_EXIT;
1726 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1728 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1730 tcg_temp_free_i32(m3);
1731 gen_set_cc_nz_f64(s, o->in2);
1732 return NO_EXIT;
1735 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1737 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1739 tcg_temp_free_i32(m3);
1740 gen_set_cc_nz_f128(s, o->in1, o->in2);
1741 return NO_EXIT;
1744 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1749 gen_set_cc_nz_f32(s, o->in2);
1750 return NO_EXIT;
1753 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 gen_set_cc_nz_f64(s, o->in2);
1759 return NO_EXIT;
1762 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 gen_set_cc_nz_f128(s, o->in1, o->in2);
1768 return NO_EXIT;
1771 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1773 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1775 tcg_temp_free_i32(m3);
1776 return NO_EXIT;
1779 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 return NO_EXIT;
1787 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1789 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1790 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1791 tcg_temp_free_i32(m3);
1792 return_low128(o->out2);
1793 return NO_EXIT;
1796 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1800 tcg_temp_free_i32(m3);
1801 return NO_EXIT;
1804 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1806 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1807 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1808 tcg_temp_free_i32(m3);
1809 return NO_EXIT;
1812 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1814 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1815 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1816 tcg_temp_free_i32(m3);
1817 return_low128(o->out2);
1818 return NO_EXIT;
1821 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1823 int r2 = get_field(s->fields, r2);
1824 TCGv_i64 len = tcg_temp_new_i64();
1826 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1827 set_cc_static(s);
1828 return_low128(o->out);
1830 tcg_gen_add_i64(regs[r2], regs[r2], len);
1831 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1832 tcg_temp_free_i64(len);
1834 return NO_EXIT;
1837 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1839 int l = get_field(s->fields, l1);
1840 TCGv_i32 vl;
1842 switch (l + 1) {
1843 case 1:
1844 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1845 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1846 break;
1847 case 2:
1848 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1849 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1850 break;
1851 case 4:
1852 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1853 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1854 break;
1855 case 8:
1856 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1857 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1858 break;
1859 default:
1860 vl = tcg_const_i32(l);
1861 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1862 tcg_temp_free_i32(vl);
1863 set_cc_static(s);
1864 return NO_EXIT;
1866 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1867 return NO_EXIT;
1870 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1872 int r1 = get_field(s->fields, r1);
1873 int r2 = get_field(s->fields, r2);
1874 TCGv_i32 t1, t2;
1876 /* r1 and r2 must be even. */
1877 if (r1 & 1 || r2 & 1) {
1878 gen_program_exception(s, PGM_SPECIFICATION);
1879 return EXIT_NORETURN;
1882 t1 = tcg_const_i32(r1);
1883 t2 = tcg_const_i32(r2);
1884 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1885 tcg_temp_free_i32(t1);
1886 tcg_temp_free_i32(t2);
1887 set_cc_static(s);
1888 return NO_EXIT;
1891 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1893 int r1 = get_field(s->fields, r1);
1894 int r3 = get_field(s->fields, r3);
1895 TCGv_i32 t1, t3;
1897 /* r1 and r3 must be even. */
1898 if (r1 & 1 || r3 & 1) {
1899 gen_program_exception(s, PGM_SPECIFICATION);
1900 return EXIT_NORETURN;
1903 t1 = tcg_const_i32(r1);
1904 t3 = tcg_const_i32(r3);
1905 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1906 tcg_temp_free_i32(t1);
1907 tcg_temp_free_i32(t3);
1908 set_cc_static(s);
1909 return NO_EXIT;
1912 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1914 int r1 = get_field(s->fields, r1);
1915 int r3 = get_field(s->fields, r3);
1916 TCGv_i32 t1, t3;
1918 /* r1 and r3 must be even. */
1919 if (r1 & 1 || r3 & 1) {
1920 gen_program_exception(s, PGM_SPECIFICATION);
1921 return EXIT_NORETURN;
1924 t1 = tcg_const_i32(r1);
1925 t3 = tcg_const_i32(r3);
1926 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1927 tcg_temp_free_i32(t1);
1928 tcg_temp_free_i32(t3);
1929 set_cc_static(s);
1930 return NO_EXIT;
1933 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1935 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1936 TCGv_i32 t1 = tcg_temp_new_i32();
1937 tcg_gen_extrl_i64_i32(t1, o->in1);
1938 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1939 set_cc_static(s);
1940 tcg_temp_free_i32(t1);
1941 tcg_temp_free_i32(m3);
1942 return NO_EXIT;
1945 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1947 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1948 set_cc_static(s);
1949 return_low128(o->in2);
1950 return NO_EXIT;
1953 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1955 TCGv_i64 t = tcg_temp_new_i64();
1956 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1957 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1958 tcg_gen_or_i64(o->out, o->out, t);
1959 tcg_temp_free_i64(t);
1960 return NO_EXIT;
1963 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1965 int d2 = get_field(s->fields, d2);
1966 int b2 = get_field(s->fields, b2);
1967 TCGv_i64 addr, cc;
1969 /* Note that in1 = R3 (new value) and
1970 in2 = (zero-extended) R1 (expected value). */
1972 addr = get_address(s, 0, b2, d2);
1973 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1974 get_mem_index(s), s->insn->data | MO_ALIGN);
1975 tcg_temp_free_i64(addr);
1977 /* Are the memory and expected values (un)equal? Note that this setcond
1978 produces the output CC value, thus the NE sense of the test. */
1979 cc = tcg_temp_new_i64();
1980 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1981 tcg_gen_extrl_i64_i32(cc_op, cc);
1982 tcg_temp_free_i64(cc);
1983 set_cc_static(s);
1985 return NO_EXIT;
1988 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1990 int r1 = get_field(s->fields, r1);
1991 int r3 = get_field(s->fields, r3);
1992 int d2 = get_field(s->fields, d2);
1993 int b2 = get_field(s->fields, b2);
1994 TCGv_i64 addr;
1995 TCGv_i32 t_r1, t_r3;
1997 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1998 addr = get_address(s, 0, b2, d2);
1999 t_r1 = tcg_const_i32(r1);
2000 t_r3 = tcg_const_i32(r3);
2001 if (tb_cflags(s->tb) & CF_PARALLEL) {
2002 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2003 } else {
2004 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2006 tcg_temp_free_i64(addr);
2007 tcg_temp_free_i32(t_r1);
2008 tcg_temp_free_i32(t_r3);
2010 set_cc_static(s);
2011 return NO_EXIT;
2014 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
2016 int r3 = get_field(s->fields, r3);
2017 TCGv_i32 t_r3 = tcg_const_i32(r3);
2019 if (tb_cflags(s->tb) & CF_PARALLEL) {
2020 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
2021 } else {
2022 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
2024 tcg_temp_free_i32(t_r3);
2026 set_cc_static(s);
2027 return NO_EXIT;
2030 #ifndef CONFIG_USER_ONLY
2031 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2033 TCGMemOp mop = s->insn->data;
2034 TCGv_i64 addr, old, cc;
2035 TCGLabel *lab = gen_new_label();
2037 /* Note that in1 = R1 (zero-extended expected value),
2038 out = R1 (original reg), out2 = R1+1 (new value). */
2040 check_privileged(s);
2041 addr = tcg_temp_new_i64();
2042 old = tcg_temp_new_i64();
2043 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2044 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2045 get_mem_index(s), mop | MO_ALIGN);
2046 tcg_temp_free_i64(addr);
2048 /* Are the memory and expected values (un)equal? */
2049 cc = tcg_temp_new_i64();
2050 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2051 tcg_gen_extrl_i64_i32(cc_op, cc);
2053 /* Write back the output now, so that it happens before the
2054 following branch, so that we don't need local temps. */
2055 if ((mop & MO_SIZE) == MO_32) {
2056 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2057 } else {
2058 tcg_gen_mov_i64(o->out, old);
2060 tcg_temp_free_i64(old);
2062 /* If the comparison was equal, and the LSB of R2 was set,
2063 then we need to flush the TLB (for all cpus). */
2064 tcg_gen_xori_i64(cc, cc, 1);
2065 tcg_gen_and_i64(cc, cc, o->in2);
2066 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2067 tcg_temp_free_i64(cc);
2069 gen_helper_purge(cpu_env);
2070 gen_set_label(lab);
2072 return NO_EXIT;
2074 #endif
2076 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2078 TCGv_i64 t1 = tcg_temp_new_i64();
2079 TCGv_i32 t2 = tcg_temp_new_i32();
2080 tcg_gen_extrl_i64_i32(t2, o->in1);
2081 gen_helper_cvd(t1, t2);
2082 tcg_temp_free_i32(t2);
2083 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2084 tcg_temp_free_i64(t1);
2085 return NO_EXIT;
2088 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2090 int m3 = get_field(s->fields, m3);
2091 TCGLabel *lab = gen_new_label();
2092 TCGCond c;
2094 c = tcg_invert_cond(ltgt_cond[m3]);
2095 if (s->insn->data) {
2096 c = tcg_unsigned_cond(c);
2098 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2100 /* Trap. */
2101 gen_trap(s);
2103 gen_set_label(lab);
2104 return NO_EXIT;
2107 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2109 int m3 = get_field(s->fields, m3);
2110 int r1 = get_field(s->fields, r1);
2111 int r2 = get_field(s->fields, r2);
2112 TCGv_i32 tr1, tr2, chk;
2114 /* R1 and R2 must both be even. */
2115 if ((r1 | r2) & 1) {
2116 gen_program_exception(s, PGM_SPECIFICATION);
2117 return EXIT_NORETURN;
2119 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2120 m3 = 0;
2123 tr1 = tcg_const_i32(r1);
2124 tr2 = tcg_const_i32(r2);
2125 chk = tcg_const_i32(m3);
2127 switch (s->insn->data) {
2128 case 12:
2129 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2130 break;
2131 case 14:
2132 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2133 break;
2134 case 21:
2135 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2136 break;
2137 case 24:
2138 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2139 break;
2140 case 41:
2141 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2142 break;
2143 case 42:
2144 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2145 break;
2146 default:
2147 g_assert_not_reached();
2150 tcg_temp_free_i32(tr1);
2151 tcg_temp_free_i32(tr2);
2152 tcg_temp_free_i32(chk);
2153 set_cc_static(s);
2154 return NO_EXIT;
2157 #ifndef CONFIG_USER_ONLY
2158 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2160 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2161 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2162 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2164 check_privileged(s);
2165 gen_helper_diag(cpu_env, r1, r3, func_code);
2167 tcg_temp_free_i32(func_code);
2168 tcg_temp_free_i32(r3);
2169 tcg_temp_free_i32(r1);
2170 return NO_EXIT;
2172 #endif
2174 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2176 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2177 return_low128(o->out);
2178 return NO_EXIT;
2181 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2183 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2184 return_low128(o->out);
2185 return NO_EXIT;
2188 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2190 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2191 return_low128(o->out);
2192 return NO_EXIT;
2195 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2197 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2198 return_low128(o->out);
2199 return NO_EXIT;
2202 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2204 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2205 return NO_EXIT;
2208 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2210 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2211 return NO_EXIT;
2214 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2216 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2217 return_low128(o->out2);
2218 return NO_EXIT;
2221 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2223 int r2 = get_field(s->fields, r2);
2224 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2225 return NO_EXIT;
2228 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2230 /* No cache information provided. */
2231 tcg_gen_movi_i64(o->out, -1);
2232 return NO_EXIT;
2235 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2237 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2238 return NO_EXIT;
2241 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2243 int r1 = get_field(s->fields, r1);
2244 int r2 = get_field(s->fields, r2);
2245 TCGv_i64 t = tcg_temp_new_i64();
2247 /* Note the "subsequently" in the PoO, which implies a defined result
2248 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2249 tcg_gen_shri_i64(t, psw_mask, 32);
2250 store_reg32_i64(r1, t);
2251 if (r2 != 0) {
2252 store_reg32_i64(r2, psw_mask);
2255 tcg_temp_free_i64(t);
2256 return NO_EXIT;
2259 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2261 int r1 = get_field(s->fields, r1);
2262 TCGv_i32 ilen;
2263 TCGv_i64 v1;
2265 /* Nested EXECUTE is not allowed. */
2266 if (unlikely(s->ex_value)) {
2267 gen_program_exception(s, PGM_EXECUTE);
2268 return EXIT_NORETURN;
2271 update_psw_addr(s);
2272 update_cc_op(s);
2274 if (r1 == 0) {
2275 v1 = tcg_const_i64(0);
2276 } else {
2277 v1 = regs[r1];
2280 ilen = tcg_const_i32(s->ilen);
2281 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2282 tcg_temp_free_i32(ilen);
2284 if (r1 == 0) {
2285 tcg_temp_free_i64(v1);
2288 return EXIT_PC_CC_UPDATED;
2291 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2293 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2294 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2295 tcg_temp_free_i32(m3);
2296 return NO_EXIT;
2299 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2301 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2302 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2303 tcg_temp_free_i32(m3);
2304 return NO_EXIT;
2307 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2309 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2310 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2311 return_low128(o->out2);
2312 tcg_temp_free_i32(m3);
2313 return NO_EXIT;
2316 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2318 /* We'll use the original input for cc computation, since we get to
2319 compare that against 0, which ought to be better than comparing
2320 the real output against 64. It also lets cc_dst be a convenient
2321 temporary during our computation. */
2322 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2324 /* R1 = IN ? CLZ(IN) : 64. */
2325 tcg_gen_clzi_i64(o->out, o->in2, 64);
2327 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2328 value by 64, which is undefined. But since the shift is 64 iff the
2329 input is zero, we still get the correct result after and'ing. */
2330 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2331 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2332 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2333 return NO_EXIT;
2336 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2338 int m3 = get_field(s->fields, m3);
2339 int pos, len, base = s->insn->data;
2340 TCGv_i64 tmp = tcg_temp_new_i64();
2341 uint64_t ccm;
2343 switch (m3) {
2344 case 0xf:
2345 /* Effectively a 32-bit load. */
2346 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2347 len = 32;
2348 goto one_insert;
2350 case 0xc:
2351 case 0x6:
2352 case 0x3:
2353 /* Effectively a 16-bit load. */
2354 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2355 len = 16;
2356 goto one_insert;
2358 case 0x8:
2359 case 0x4:
2360 case 0x2:
2361 case 0x1:
2362 /* Effectively an 8-bit load. */
2363 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2364 len = 8;
2365 goto one_insert;
2367 one_insert:
2368 pos = base + ctz32(m3) * 8;
2369 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2370 ccm = ((1ull << len) - 1) << pos;
2371 break;
2373 default:
2374 /* This is going to be a sequence of loads and inserts. */
2375 pos = base + 32 - 8;
2376 ccm = 0;
2377 while (m3) {
2378 if (m3 & 0x8) {
2379 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2380 tcg_gen_addi_i64(o->in2, o->in2, 1);
2381 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2382 ccm |= 0xff << pos;
2384 m3 = (m3 << 1) & 0xf;
2385 pos -= 8;
2387 break;
2390 tcg_gen_movi_i64(tmp, ccm);
2391 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2392 tcg_temp_free_i64(tmp);
2393 return NO_EXIT;
2396 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2398 int shift = s->insn->data & 0xff;
2399 int size = s->insn->data >> 8;
2400 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2401 return NO_EXIT;
2404 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2406 TCGv_i64 t1;
2408 gen_op_calc_cc(s);
2409 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2411 t1 = tcg_temp_new_i64();
2412 tcg_gen_shli_i64(t1, psw_mask, 20);
2413 tcg_gen_shri_i64(t1, t1, 36);
2414 tcg_gen_or_i64(o->out, o->out, t1);
2416 tcg_gen_extu_i32_i64(t1, cc_op);
2417 tcg_gen_shli_i64(t1, t1, 28);
2418 tcg_gen_or_i64(o->out, o->out, t1);
2419 tcg_temp_free_i64(t1);
2420 return NO_EXIT;
2423 #ifndef CONFIG_USER_ONLY
2424 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2426 TCGv_i32 m4;
2428 check_privileged(s);
2429 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2430 m4 = tcg_const_i32(get_field(s->fields, m4));
2431 } else {
2432 m4 = tcg_const_i32(0);
2434 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2435 tcg_temp_free_i32(m4);
2436 return NO_EXIT;
2439 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2441 TCGv_i32 m4;
2443 check_privileged(s);
2444 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2445 m4 = tcg_const_i32(get_field(s->fields, m4));
2446 } else {
2447 m4 = tcg_const_i32(0);
2449 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2450 tcg_temp_free_i32(m4);
2451 return NO_EXIT;
2454 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2456 check_privileged(s);
2457 gen_helper_iske(o->out, cpu_env, o->in2);
2458 return NO_EXIT;
2460 #endif
2462 static ExitStatus op_msa(DisasContext *s, DisasOps *o)
2464 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2465 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2466 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2467 TCGv_i32 t_r1, t_r2, t_r3, type;
2469 switch (s->insn->data) {
2470 case S390_FEAT_TYPE_KMCTR:
2471 if (r3 & 1 || !r3) {
2472 gen_program_exception(s, PGM_SPECIFICATION);
2473 return EXIT_NORETURN;
2475 /* FALL THROUGH */
2476 case S390_FEAT_TYPE_PPNO:
2477 case S390_FEAT_TYPE_KMF:
2478 case S390_FEAT_TYPE_KMC:
2479 case S390_FEAT_TYPE_KMO:
2480 case S390_FEAT_TYPE_KM:
2481 if (r1 & 1 || !r1) {
2482 gen_program_exception(s, PGM_SPECIFICATION);
2483 return EXIT_NORETURN;
2485 /* FALL THROUGH */
2486 case S390_FEAT_TYPE_KMAC:
2487 case S390_FEAT_TYPE_KIMD:
2488 case S390_FEAT_TYPE_KLMD:
2489 if (r2 & 1 || !r2) {
2490 gen_program_exception(s, PGM_SPECIFICATION);
2491 return EXIT_NORETURN;
2493 /* FALL THROUGH */
2494 case S390_FEAT_TYPE_PCKMO:
2495 case S390_FEAT_TYPE_PCC:
2496 break;
2497 default:
2498 g_assert_not_reached();
2501 t_r1 = tcg_const_i32(r1);
2502 t_r2 = tcg_const_i32(r2);
2503 t_r3 = tcg_const_i32(r3);
2504 type = tcg_const_i32(s->insn->data);
2505 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2506 set_cc_static(s);
2507 tcg_temp_free_i32(t_r1);
2508 tcg_temp_free_i32(t_r2);
2509 tcg_temp_free_i32(t_r3);
2510 tcg_temp_free_i32(type);
2511 return NO_EXIT;
2514 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2516 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2517 set_cc_static(s);
2518 return NO_EXIT;
2521 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2523 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2524 set_cc_static(s);
2525 return NO_EXIT;
2528 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2530 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2531 set_cc_static(s);
2532 return NO_EXIT;
2535 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2537 /* The real output is indeed the original value in memory;
2538 recompute the addition for the computation of CC. */
2539 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2540 s->insn->data | MO_ALIGN);
2541 /* However, we need to recompute the addition for setting CC. */
2542 tcg_gen_add_i64(o->out, o->in1, o->in2);
2543 return NO_EXIT;
2546 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2548 /* The real output is indeed the original value in memory;
2549 recompute the addition for the computation of CC. */
2550 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2551 s->insn->data | MO_ALIGN);
2552 /* However, we need to recompute the operation for setting CC. */
2553 tcg_gen_and_i64(o->out, o->in1, o->in2);
2554 return NO_EXIT;
2557 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2559 /* The real output is indeed the original value in memory;
2560 recompute the addition for the computation of CC. */
2561 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2562 s->insn->data | MO_ALIGN);
2563 /* However, we need to recompute the operation for setting CC. */
2564 tcg_gen_or_i64(o->out, o->in1, o->in2);
2565 return NO_EXIT;
2568 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2570 /* The real output is indeed the original value in memory;
2571 recompute the addition for the computation of CC. */
2572 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2573 s->insn->data | MO_ALIGN);
2574 /* However, we need to recompute the operation for setting CC. */
2575 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2576 return NO_EXIT;
2579 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2581 gen_helper_ldeb(o->out, cpu_env, o->in2);
2582 return NO_EXIT;
2585 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2587 gen_helper_ledb(o->out, cpu_env, o->in2);
2588 return NO_EXIT;
2591 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2593 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2594 return NO_EXIT;
2597 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2599 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2600 return NO_EXIT;
2603 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2605 gen_helper_lxdb(o->out, cpu_env, o->in2);
2606 return_low128(o->out2);
2607 return NO_EXIT;
2610 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2612 gen_helper_lxeb(o->out, cpu_env, o->in2);
2613 return_low128(o->out2);
2614 return NO_EXIT;
2617 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2619 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2620 return NO_EXIT;
2623 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2625 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2626 return NO_EXIT;
2629 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2631 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2632 return NO_EXIT;
2635 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2637 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2638 return NO_EXIT;
2641 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2643 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2644 return NO_EXIT;
2647 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2649 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2650 return NO_EXIT;
2653 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2655 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2656 return NO_EXIT;
2659 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2661 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2662 return NO_EXIT;
2665 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2667 TCGLabel *lab = gen_new_label();
2668 store_reg32_i64(get_field(s->fields, r1), o->in2);
2669 /* The value is stored even in case of trap. */
2670 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2671 gen_trap(s);
2672 gen_set_label(lab);
2673 return NO_EXIT;
2676 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2678 TCGLabel *lab = gen_new_label();
2679 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2680 /* The value is stored even in case of trap. */
2681 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2682 gen_trap(s);
2683 gen_set_label(lab);
2684 return NO_EXIT;
2687 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2689 TCGLabel *lab = gen_new_label();
2690 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2691 /* The value is stored even in case of trap. */
2692 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2693 gen_trap(s);
2694 gen_set_label(lab);
2695 return NO_EXIT;
2698 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2700 TCGLabel *lab = gen_new_label();
2701 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2702 /* The value is stored even in case of trap. */
2703 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2704 gen_trap(s);
2705 gen_set_label(lab);
2706 return NO_EXIT;
2709 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2711 TCGLabel *lab = gen_new_label();
2712 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2713 /* The value is stored even in case of trap. */
2714 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2715 gen_trap(s);
2716 gen_set_label(lab);
2717 return NO_EXIT;
2720 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2722 DisasCompare c;
2724 disas_jcc(s, &c, get_field(s->fields, m3));
2726 if (c.is_64) {
2727 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2728 o->in2, o->in1);
2729 free_compare(&c);
2730 } else {
2731 TCGv_i32 t32 = tcg_temp_new_i32();
2732 TCGv_i64 t, z;
2734 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2735 free_compare(&c);
2737 t = tcg_temp_new_i64();
2738 tcg_gen_extu_i32_i64(t, t32);
2739 tcg_temp_free_i32(t32);
2741 z = tcg_const_i64(0);
2742 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2743 tcg_temp_free_i64(t);
2744 tcg_temp_free_i64(z);
2747 return NO_EXIT;
2750 #ifndef CONFIG_USER_ONLY
2751 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2753 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2754 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2755 check_privileged(s);
2756 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2757 tcg_temp_free_i32(r1);
2758 tcg_temp_free_i32(r3);
2759 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2760 return EXIT_PC_STALE_NOCHAIN;
2763 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2765 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2766 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2767 check_privileged(s);
2768 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2769 tcg_temp_free_i32(r1);
2770 tcg_temp_free_i32(r3);
2771 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2772 return EXIT_PC_STALE_NOCHAIN;
2775 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2777 check_privileged(s);
2778 gen_helper_lra(o->out, cpu_env, o->in2);
2779 set_cc_static(s);
2780 return NO_EXIT;
2783 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2785 check_privileged(s);
2787 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2788 return NO_EXIT;
2791 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2793 TCGv_i64 t1, t2;
2795 check_privileged(s);
2796 per_breaking_event(s);
2798 t1 = tcg_temp_new_i64();
2799 t2 = tcg_temp_new_i64();
2800 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2801 tcg_gen_addi_i64(o->in2, o->in2, 4);
2802 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2803 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2804 tcg_gen_shli_i64(t1, t1, 32);
2805 gen_helper_load_psw(cpu_env, t1, t2);
2806 tcg_temp_free_i64(t1);
2807 tcg_temp_free_i64(t2);
2808 return EXIT_NORETURN;
2811 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2813 TCGv_i64 t1, t2;
2815 check_privileged(s);
2816 per_breaking_event(s);
2818 t1 = tcg_temp_new_i64();
2819 t2 = tcg_temp_new_i64();
2820 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2821 tcg_gen_addi_i64(o->in2, o->in2, 8);
2822 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2823 gen_helper_load_psw(cpu_env, t1, t2);
2824 tcg_temp_free_i64(t1);
2825 tcg_temp_free_i64(t2);
2826 return EXIT_NORETURN;
2828 #endif
2830 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2832 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2833 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2834 gen_helper_lam(cpu_env, r1, o->in2, r3);
2835 tcg_temp_free_i32(r1);
2836 tcg_temp_free_i32(r3);
2837 return NO_EXIT;
2840 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2842 int r1 = get_field(s->fields, r1);
2843 int r3 = get_field(s->fields, r3);
2844 TCGv_i64 t1, t2;
2846 /* Only one register to read. */
2847 t1 = tcg_temp_new_i64();
2848 if (unlikely(r1 == r3)) {
2849 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2850 store_reg32_i64(r1, t1);
2851 tcg_temp_free(t1);
2852 return NO_EXIT;
2855 /* First load the values of the first and last registers to trigger
2856 possible page faults. */
2857 t2 = tcg_temp_new_i64();
2858 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2859 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2860 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2861 store_reg32_i64(r1, t1);
2862 store_reg32_i64(r3, t2);
2864 /* Only two registers to read. */
2865 if (((r1 + 1) & 15) == r3) {
2866 tcg_temp_free(t2);
2867 tcg_temp_free(t1);
2868 return NO_EXIT;
2871 /* Then load the remaining registers. Page fault can't occur. */
2872 r3 = (r3 - 1) & 15;
2873 tcg_gen_movi_i64(t2, 4);
2874 while (r1 != r3) {
2875 r1 = (r1 + 1) & 15;
2876 tcg_gen_add_i64(o->in2, o->in2, t2);
2877 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2878 store_reg32_i64(r1, t1);
2880 tcg_temp_free(t2);
2881 tcg_temp_free(t1);
2883 return NO_EXIT;
2886 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2888 int r1 = get_field(s->fields, r1);
2889 int r3 = get_field(s->fields, r3);
2890 TCGv_i64 t1, t2;
2892 /* Only one register to read. */
2893 t1 = tcg_temp_new_i64();
2894 if (unlikely(r1 == r3)) {
2895 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2896 store_reg32h_i64(r1, t1);
2897 tcg_temp_free(t1);
2898 return NO_EXIT;
2901 /* First load the values of the first and last registers to trigger
2902 possible page faults. */
2903 t2 = tcg_temp_new_i64();
2904 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2905 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2906 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2907 store_reg32h_i64(r1, t1);
2908 store_reg32h_i64(r3, t2);
2910 /* Only two registers to read. */
2911 if (((r1 + 1) & 15) == r3) {
2912 tcg_temp_free(t2);
2913 tcg_temp_free(t1);
2914 return NO_EXIT;
2917 /* Then load the remaining registers. Page fault can't occur. */
2918 r3 = (r3 - 1) & 15;
2919 tcg_gen_movi_i64(t2, 4);
2920 while (r1 != r3) {
2921 r1 = (r1 + 1) & 15;
2922 tcg_gen_add_i64(o->in2, o->in2, t2);
2923 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2924 store_reg32h_i64(r1, t1);
2926 tcg_temp_free(t2);
2927 tcg_temp_free(t1);
2929 return NO_EXIT;
2932 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2934 int r1 = get_field(s->fields, r1);
2935 int r3 = get_field(s->fields, r3);
2936 TCGv_i64 t1, t2;
2938 /* Only one register to read. */
2939 if (unlikely(r1 == r3)) {
2940 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2941 return NO_EXIT;
2944 /* First load the values of the first and last registers to trigger
2945 possible page faults. */
2946 t1 = tcg_temp_new_i64();
2947 t2 = tcg_temp_new_i64();
2948 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2949 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2950 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2951 tcg_gen_mov_i64(regs[r1], t1);
2952 tcg_temp_free(t2);
2954 /* Only two registers to read. */
2955 if (((r1 + 1) & 15) == r3) {
2956 tcg_temp_free(t1);
2957 return NO_EXIT;
2960 /* Then load the remaining registers. Page fault can't occur. */
2961 r3 = (r3 - 1) & 15;
2962 tcg_gen_movi_i64(t1, 8);
2963 while (r1 != r3) {
2964 r1 = (r1 + 1) & 15;
2965 tcg_gen_add_i64(o->in2, o->in2, t1);
2966 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2968 tcg_temp_free(t1);
2970 return NO_EXIT;
2973 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2975 TCGv_i64 a1, a2;
2976 TCGMemOp mop = s->insn->data;
2978 /* In a parallel context, stop the world and single step. */
2979 if (tb_cflags(s->tb) & CF_PARALLEL) {
2980 update_psw_addr(s);
2981 update_cc_op(s);
2982 gen_exception(EXCP_ATOMIC);
2983 return EXIT_NORETURN;
2986 /* In a serial context, perform the two loads ... */
2987 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2988 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2989 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2990 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2991 tcg_temp_free_i64(a1);
2992 tcg_temp_free_i64(a2);
2994 /* ... and indicate that we performed them while interlocked. */
2995 gen_op_movi_cc(s, 0);
2996 return NO_EXIT;
2999 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
3001 if (tb_cflags(s->tb) & CF_PARALLEL) {
3002 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3003 } else {
3004 gen_helper_lpq(o->out, cpu_env, o->in2);
3006 return_low128(o->out2);
3007 return NO_EXIT;
3010 #ifndef CONFIG_USER_ONLY
3011 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
3013 check_privileged(s);
3014 gen_helper_lura(o->out, cpu_env, o->in2);
3015 return NO_EXIT;
3018 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
3020 check_privileged(s);
3021 gen_helper_lurag(o->out, cpu_env, o->in2);
3022 return NO_EXIT;
3024 #endif
3026 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
3028 tcg_gen_andi_i64(o->out, o->in2, -256);
3029 return NO_EXIT;
3032 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3034 o->out = o->in2;
3035 o->g_out = o->g_in2;
3036 o->in2 = NULL;
3037 o->g_in2 = false;
3038 return NO_EXIT;
3041 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
3043 int b2 = get_field(s->fields, b2);
3044 TCGv ar1 = tcg_temp_new_i64();
3046 o->out = o->in2;
3047 o->g_out = o->g_in2;
3048 o->in2 = NULL;
3049 o->g_in2 = false;
3051 switch (s->tb->flags & FLAG_MASK_ASC) {
3052 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3053 tcg_gen_movi_i64(ar1, 0);
3054 break;
3055 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3056 tcg_gen_movi_i64(ar1, 1);
3057 break;
3058 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3059 if (b2) {
3060 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3061 } else {
3062 tcg_gen_movi_i64(ar1, 0);
3064 break;
3065 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3066 tcg_gen_movi_i64(ar1, 2);
3067 break;
3070 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3071 tcg_temp_free_i64(ar1);
3073 return NO_EXIT;
3076 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3078 o->out = o->in1;
3079 o->out2 = o->in2;
3080 o->g_out = o->g_in1;
3081 o->g_out2 = o->g_in2;
3082 o->in1 = NULL;
3083 o->in2 = NULL;
3084 o->g_in1 = o->g_in2 = false;
3085 return NO_EXIT;
3088 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3090 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3091 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3092 tcg_temp_free_i32(l);
3093 return NO_EXIT;
3096 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3098 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3099 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3100 tcg_temp_free_i32(l);
3101 return NO_EXIT;
3104 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3106 int r1 = get_field(s->fields, r1);
3107 int r2 = get_field(s->fields, r2);
3108 TCGv_i32 t1, t2;
3110 /* r1 and r2 must be even. */
3111 if (r1 & 1 || r2 & 1) {
3112 gen_program_exception(s, PGM_SPECIFICATION);
3113 return EXIT_NORETURN;
3116 t1 = tcg_const_i32(r1);
3117 t2 = tcg_const_i32(r2);
3118 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3119 tcg_temp_free_i32(t1);
3120 tcg_temp_free_i32(t2);
3121 set_cc_static(s);
3122 return NO_EXIT;
3125 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3127 int r1 = get_field(s->fields, r1);
3128 int r3 = get_field(s->fields, r3);
3129 TCGv_i32 t1, t3;
3131 /* r1 and r3 must be even. */
3132 if (r1 & 1 || r3 & 1) {
3133 gen_program_exception(s, PGM_SPECIFICATION);
3134 return EXIT_NORETURN;
3137 t1 = tcg_const_i32(r1);
3138 t3 = tcg_const_i32(r3);
3139 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3140 tcg_temp_free_i32(t1);
3141 tcg_temp_free_i32(t3);
3142 set_cc_static(s);
3143 return NO_EXIT;
3146 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3148 int r1 = get_field(s->fields, r1);
3149 int r3 = get_field(s->fields, r3);
3150 TCGv_i32 t1, t3;
3152 /* r1 and r3 must be even. */
3153 if (r1 & 1 || r3 & 1) {
3154 gen_program_exception(s, PGM_SPECIFICATION);
3155 return EXIT_NORETURN;
3158 t1 = tcg_const_i32(r1);
3159 t3 = tcg_const_i32(r3);
3160 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3161 tcg_temp_free_i32(t1);
3162 tcg_temp_free_i32(t3);
3163 set_cc_static(s);
3164 return NO_EXIT;
3167 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3169 int r3 = get_field(s->fields, r3);
3170 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3171 set_cc_static(s);
3172 return NO_EXIT;
3175 #ifndef CONFIG_USER_ONLY
3176 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3178 int r1 = get_field(s->fields, l1);
3179 check_privileged(s);
3180 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3181 set_cc_static(s);
3182 return NO_EXIT;
3185 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3187 int r1 = get_field(s->fields, l1);
3188 check_privileged(s);
3189 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3190 set_cc_static(s);
3191 return NO_EXIT;
3193 #endif
3195 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3197 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3198 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3199 tcg_temp_free_i32(l);
3200 return NO_EXIT;
3203 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3205 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3206 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3207 tcg_temp_free_i32(l);
3208 return NO_EXIT;
3211 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3213 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3214 set_cc_static(s);
3215 return NO_EXIT;
3218 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3220 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3221 set_cc_static(s);
3222 return_low128(o->in2);
3223 return NO_EXIT;
3226 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3228 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3229 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3230 tcg_temp_free_i32(l);
3231 return NO_EXIT;
3234 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3236 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3237 return NO_EXIT;
3240 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3242 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3243 return NO_EXIT;
3246 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3248 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3249 return NO_EXIT;
3252 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3254 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3255 return NO_EXIT;
3258 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3260 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3261 return NO_EXIT;
3264 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3266 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3267 return_low128(o->out2);
3268 return NO_EXIT;
3271 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3273 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3274 return_low128(o->out2);
3275 return NO_EXIT;
3278 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3280 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3281 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3282 tcg_temp_free_i64(r3);
3283 return NO_EXIT;
3286 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3288 int r3 = get_field(s->fields, r3);
3289 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3290 return NO_EXIT;
3293 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3295 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3296 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3297 tcg_temp_free_i64(r3);
3298 return NO_EXIT;
3301 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3303 int r3 = get_field(s->fields, r3);
3304 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3305 return NO_EXIT;
3308 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3310 TCGv_i64 z, n;
3311 z = tcg_const_i64(0);
3312 n = tcg_temp_new_i64();
3313 tcg_gen_neg_i64(n, o->in2);
3314 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3315 tcg_temp_free_i64(n);
3316 tcg_temp_free_i64(z);
3317 return NO_EXIT;
3320 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3322 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3323 return NO_EXIT;
3326 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3328 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3329 return NO_EXIT;
3332 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3334 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3335 tcg_gen_mov_i64(o->out2, o->in2);
3336 return NO_EXIT;
3339 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3341 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3342 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3343 tcg_temp_free_i32(l);
3344 set_cc_static(s);
3345 return NO_EXIT;
3348 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3350 tcg_gen_neg_i64(o->out, o->in2);
3351 return NO_EXIT;
3354 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3356 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3357 return NO_EXIT;
3360 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3362 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3363 return NO_EXIT;
3366 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3368 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3369 tcg_gen_mov_i64(o->out2, o->in2);
3370 return NO_EXIT;
3373 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3375 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3376 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3377 tcg_temp_free_i32(l);
3378 set_cc_static(s);
3379 return NO_EXIT;
3382 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3384 tcg_gen_or_i64(o->out, o->in1, o->in2);
3385 return NO_EXIT;
3388 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3390 int shift = s->insn->data & 0xff;
3391 int size = s->insn->data >> 8;
3392 uint64_t mask = ((1ull << size) - 1) << shift;
3394 assert(!o->g_in2);
3395 tcg_gen_shli_i64(o->in2, o->in2, shift);
3396 tcg_gen_or_i64(o->out, o->in1, o->in2);
3398 /* Produce the CC from only the bits manipulated. */
3399 tcg_gen_andi_i64(cc_dst, o->out, mask);
3400 set_cc_nz_u64(s, cc_dst);
3401 return NO_EXIT;
3404 static ExitStatus op_oi(DisasContext *s, DisasOps *o)
3406 o->in1 = tcg_temp_new_i64();
3408 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3409 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3410 } else {
3411 /* Perform the atomic operation in memory. */
3412 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3413 s->insn->data);
3416 /* Recompute also for atomic case: needed for setting CC. */
3417 tcg_gen_or_i64(o->out, o->in1, o->in2);
3419 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3420 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3422 return NO_EXIT;
3425 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3427 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3428 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3429 tcg_temp_free_i32(l);
3430 return NO_EXIT;
3433 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3435 int l2 = get_field(s->fields, l2) + 1;
3436 TCGv_i32 l;
3438 /* The length must not exceed 32 bytes. */
3439 if (l2 > 32) {
3440 gen_program_exception(s, PGM_SPECIFICATION);
3441 return EXIT_NORETURN;
3443 l = tcg_const_i32(l2);
3444 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3445 tcg_temp_free_i32(l);
3446 return NO_EXIT;
3449 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3451 int l2 = get_field(s->fields, l2) + 1;
3452 TCGv_i32 l;
3454 /* The length must be even and should not exceed 64 bytes. */
3455 if ((l2 & 1) || (l2 > 64)) {
3456 gen_program_exception(s, PGM_SPECIFICATION);
3457 return EXIT_NORETURN;
3459 l = tcg_const_i32(l2);
3460 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3461 tcg_temp_free_i32(l);
3462 return NO_EXIT;
3465 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3467 gen_helper_popcnt(o->out, o->in2);
3468 return NO_EXIT;
3471 #ifndef CONFIG_USER_ONLY
3472 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3474 check_privileged(s);
3475 gen_helper_ptlb(cpu_env);
3476 return NO_EXIT;
3478 #endif
3480 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3482 int i3 = get_field(s->fields, i3);
3483 int i4 = get_field(s->fields, i4);
3484 int i5 = get_field(s->fields, i5);
3485 int do_zero = i4 & 0x80;
3486 uint64_t mask, imask, pmask;
3487 int pos, len, rot;
3489 /* Adjust the arguments for the specific insn. */
3490 switch (s->fields->op2) {
3491 case 0x55: /* risbg */
3492 case 0x59: /* risbgn */
3493 i3 &= 63;
3494 i4 &= 63;
3495 pmask = ~0;
3496 break;
3497 case 0x5d: /* risbhg */
3498 i3 &= 31;
3499 i4 &= 31;
3500 pmask = 0xffffffff00000000ull;
3501 break;
3502 case 0x51: /* risblg */
3503 i3 &= 31;
3504 i4 &= 31;
3505 pmask = 0x00000000ffffffffull;
3506 break;
3507 default:
3508 g_assert_not_reached();
3511 /* MASK is the set of bits to be inserted from R2.
3512 Take care for I3/I4 wraparound. */
3513 mask = pmask >> i3;
3514 if (i3 <= i4) {
3515 mask ^= pmask >> i4 >> 1;
3516 } else {
3517 mask |= ~(pmask >> i4 >> 1);
3519 mask &= pmask;
3521 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3522 insns, we need to keep the other half of the register. */
3523 imask = ~mask | ~pmask;
3524 if (do_zero) {
3525 imask = ~pmask;
3528 len = i4 - i3 + 1;
3529 pos = 63 - i4;
3530 rot = i5 & 63;
3531 if (s->fields->op2 == 0x5d) {
3532 pos += 32;
3535 /* In some cases we can implement this with extract. */
3536 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3537 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3538 return NO_EXIT;
3541 /* In some cases we can implement this with deposit. */
3542 if (len > 0 && (imask == 0 || ~mask == imask)) {
3543 /* Note that we rotate the bits to be inserted to the lsb, not to
3544 the position as described in the PoO. */
3545 rot = (rot - pos) & 63;
3546 } else {
3547 pos = -1;
3550 /* Rotate the input as necessary. */
3551 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3553 /* Insert the selected bits into the output. */
3554 if (pos >= 0) {
3555 if (imask == 0) {
3556 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3557 } else {
3558 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3560 } else if (imask == 0) {
3561 tcg_gen_andi_i64(o->out, o->in2, mask);
3562 } else {
3563 tcg_gen_andi_i64(o->in2, o->in2, mask);
3564 tcg_gen_andi_i64(o->out, o->out, imask);
3565 tcg_gen_or_i64(o->out, o->out, o->in2);
3567 return NO_EXIT;
3570 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3572 int i3 = get_field(s->fields, i3);
3573 int i4 = get_field(s->fields, i4);
3574 int i5 = get_field(s->fields, i5);
3575 uint64_t mask;
3577 /* If this is a test-only form, arrange to discard the result. */
3578 if (i3 & 0x80) {
3579 o->out = tcg_temp_new_i64();
3580 o->g_out = false;
3583 i3 &= 63;
3584 i4 &= 63;
3585 i5 &= 63;
3587 /* MASK is the set of bits to be operated on from R2.
3588 Take care for I3/I4 wraparound. */
3589 mask = ~0ull >> i3;
3590 if (i3 <= i4) {
3591 mask ^= ~0ull >> i4 >> 1;
3592 } else {
3593 mask |= ~(~0ull >> i4 >> 1);
3596 /* Rotate the input as necessary. */
3597 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3599 /* Operate. */
3600 switch (s->fields->op2) {
3601 case 0x55: /* AND */
3602 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3603 tcg_gen_and_i64(o->out, o->out, o->in2);
3604 break;
3605 case 0x56: /* OR */
3606 tcg_gen_andi_i64(o->in2, o->in2, mask);
3607 tcg_gen_or_i64(o->out, o->out, o->in2);
3608 break;
3609 case 0x57: /* XOR */
3610 tcg_gen_andi_i64(o->in2, o->in2, mask);
3611 tcg_gen_xor_i64(o->out, o->out, o->in2);
3612 break;
3613 default:
3614 abort();
3617 /* Set the CC. */
3618 tcg_gen_andi_i64(cc_dst, o->out, mask);
3619 set_cc_nz_u64(s, cc_dst);
3620 return NO_EXIT;
3623 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3625 tcg_gen_bswap16_i64(o->out, o->in2);
3626 return NO_EXIT;
3629 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3631 tcg_gen_bswap32_i64(o->out, o->in2);
3632 return NO_EXIT;
3635 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3637 tcg_gen_bswap64_i64(o->out, o->in2);
3638 return NO_EXIT;
3641 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3643 TCGv_i32 t1 = tcg_temp_new_i32();
3644 TCGv_i32 t2 = tcg_temp_new_i32();
3645 TCGv_i32 to = tcg_temp_new_i32();
3646 tcg_gen_extrl_i64_i32(t1, o->in1);
3647 tcg_gen_extrl_i64_i32(t2, o->in2);
3648 tcg_gen_rotl_i32(to, t1, t2);
3649 tcg_gen_extu_i32_i64(o->out, to);
3650 tcg_temp_free_i32(t1);
3651 tcg_temp_free_i32(t2);
3652 tcg_temp_free_i32(to);
3653 return NO_EXIT;
3656 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3658 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3659 return NO_EXIT;
3662 #ifndef CONFIG_USER_ONLY
3663 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3665 check_privileged(s);
3666 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3667 set_cc_static(s);
3668 return NO_EXIT;
3671 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3673 check_privileged(s);
3674 gen_helper_sacf(cpu_env, o->in2);
3675 /* Addressing mode has changed, so end the block. */
3676 return EXIT_PC_STALE;
3678 #endif
3680 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3682 int sam = s->insn->data;
3683 TCGv_i64 tsam;
3684 uint64_t mask;
3686 switch (sam) {
3687 case 0:
3688 mask = 0xffffff;
3689 break;
3690 case 1:
3691 mask = 0x7fffffff;
3692 break;
3693 default:
3694 mask = -1;
3695 break;
3698 /* Bizarre but true, we check the address of the current insn for the
3699 specification exception, not the next to be executed. Thus the PoO
3700 documents that Bad Things Happen two bytes before the end. */
3701 if (s->pc & ~mask) {
3702 gen_program_exception(s, PGM_SPECIFICATION);
3703 return EXIT_NORETURN;
3705 s->next_pc &= mask;
3707 tsam = tcg_const_i64(sam);
3708 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3709 tcg_temp_free_i64(tsam);
3711 /* Always exit the TB, since we (may have) changed execution mode. */
3712 return EXIT_PC_STALE;
3715 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3717 int r1 = get_field(s->fields, r1);
3718 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3719 return NO_EXIT;
3722 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3724 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3725 return NO_EXIT;
3728 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3730 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3731 return NO_EXIT;
3734 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3736 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3737 return_low128(o->out2);
3738 return NO_EXIT;
3741 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3743 gen_helper_sqeb(o->out, cpu_env, o->in2);
3744 return NO_EXIT;
3747 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3749 gen_helper_sqdb(o->out, cpu_env, o->in2);
3750 return NO_EXIT;
3753 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3755 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3756 return_low128(o->out2);
3757 return NO_EXIT;
3760 #ifndef CONFIG_USER_ONLY
3761 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3763 check_privileged(s);
3764 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3765 set_cc_static(s);
3766 return NO_EXIT;
3769 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3771 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3772 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3773 check_privileged(s);
3774 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3775 set_cc_static(s);
3776 tcg_temp_free_i32(r1);
3777 tcg_temp_free_i32(r3);
3778 return NO_EXIT;
3780 #endif
3782 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3784 DisasCompare c;
3785 TCGv_i64 a, h;
3786 TCGLabel *lab;
3787 int r1;
3789 disas_jcc(s, &c, get_field(s->fields, m3));
3791 /* We want to store when the condition is fulfilled, so branch
3792 out when it's not */
3793 c.cond = tcg_invert_cond(c.cond);
3795 lab = gen_new_label();
3796 if (c.is_64) {
3797 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3798 } else {
3799 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3801 free_compare(&c);
3803 r1 = get_field(s->fields, r1);
3804 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3805 switch (s->insn->data) {
3806 case 1: /* STOCG */
3807 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3808 break;
3809 case 0: /* STOC */
3810 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3811 break;
3812 case 2: /* STOCFH */
3813 h = tcg_temp_new_i64();
3814 tcg_gen_shri_i64(h, regs[r1], 32);
3815 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3816 tcg_temp_free_i64(h);
3817 break;
3818 default:
3819 g_assert_not_reached();
3821 tcg_temp_free_i64(a);
3823 gen_set_label(lab);
3824 return NO_EXIT;
3827 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3829 uint64_t sign = 1ull << s->insn->data;
3830 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3831 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3832 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3833 /* The arithmetic left shift is curious in that it does not affect
3834 the sign bit. Copy that over from the source unchanged. */
3835 tcg_gen_andi_i64(o->out, o->out, ~sign);
3836 tcg_gen_andi_i64(o->in1, o->in1, sign);
3837 tcg_gen_or_i64(o->out, o->out, o->in1);
3838 return NO_EXIT;
3841 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3843 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3844 return NO_EXIT;
3847 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3849 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3850 return NO_EXIT;
3853 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3855 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3856 return NO_EXIT;
3859 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3861 gen_helper_sfpc(cpu_env, o->in2);
3862 return NO_EXIT;
3865 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3867 gen_helper_sfas(cpu_env, o->in2);
3868 return NO_EXIT;
3871 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3873 int b2 = get_field(s->fields, b2);
3874 int d2 = get_field(s->fields, d2);
3875 TCGv_i64 t1 = tcg_temp_new_i64();
3876 TCGv_i64 t2 = tcg_temp_new_i64();
3877 int mask, pos, len;
3879 switch (s->fields->op2) {
3880 case 0x99: /* SRNM */
3881 pos = 0, len = 2;
3882 break;
3883 case 0xb8: /* SRNMB */
3884 pos = 0, len = 3;
3885 break;
3886 case 0xb9: /* SRNMT */
3887 pos = 4, len = 3;
3888 break;
3889 default:
3890 tcg_abort();
3892 mask = (1 << len) - 1;
3894 /* Insert the value into the appropriate field of the FPC. */
3895 if (b2 == 0) {
3896 tcg_gen_movi_i64(t1, d2 & mask);
3897 } else {
3898 tcg_gen_addi_i64(t1, regs[b2], d2);
3899 tcg_gen_andi_i64(t1, t1, mask);
3901 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3902 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3903 tcg_temp_free_i64(t1);
3905 /* Then install the new FPC to set the rounding mode in fpu_status. */
3906 gen_helper_sfpc(cpu_env, t2);
3907 tcg_temp_free_i64(t2);
3908 return NO_EXIT;
3911 static ExitStatus op_spm(DisasContext *s, DisasOps *o)
3913 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3914 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3915 set_cc_static(s);
3917 tcg_gen_shri_i64(o->in1, o->in1, 24);
3918 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3919 return NO_EXIT;
3922 static ExitStatus op_ectg(DisasContext *s, DisasOps *o)
3924 int b1 = get_field(s->fields, b1);
3925 int d1 = get_field(s->fields, d1);
3926 int b2 = get_field(s->fields, b2);
3927 int d2 = get_field(s->fields, d2);
3928 int r3 = get_field(s->fields, r3);
3929 TCGv_i64 tmp = tcg_temp_new_i64();
3931 /* fetch all operands first */
3932 o->in1 = tcg_temp_new_i64();
3933 tcg_gen_addi_i64(o->in1, regs[b1], d1);
3934 o->in2 = tcg_temp_new_i64();
3935 tcg_gen_addi_i64(o->in2, regs[b2], d2);
3936 o->addr1 = get_address(s, 0, r3, 0);
3938 /* load the third operand into r3 before modifying anything */
3939 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
3941 /* subtract CPU timer from first operand and store in GR0 */
3942 gen_helper_stpt(tmp, cpu_env);
3943 tcg_gen_sub_i64(regs[0], o->in1, tmp);
3945 /* store second operand in GR1 */
3946 tcg_gen_mov_i64(regs[1], o->in2);
3948 tcg_temp_free_i64(tmp);
3949 return NO_EXIT;
3952 #ifndef CONFIG_USER_ONLY
3953 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3955 check_privileged(s);
3956 tcg_gen_shri_i64(o->in2, o->in2, 4);
3957 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3958 return NO_EXIT;
3961 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3963 check_privileged(s);
3964 gen_helper_sske(cpu_env, o->in1, o->in2);
3965 return NO_EXIT;
3968 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3970 check_privileged(s);
3971 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3972 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3973 return EXIT_PC_STALE_NOCHAIN;
3976 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3978 check_privileged(s);
3979 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3980 return NO_EXIT;
3983 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3985 gen_helper_stck(o->out, cpu_env);
3986 /* ??? We don't implement clock states. */
3987 gen_op_movi_cc(s, 0);
3988 return NO_EXIT;
3991 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3993 TCGv_i64 c1 = tcg_temp_new_i64();
3994 TCGv_i64 c2 = tcg_temp_new_i64();
3995 TCGv_i64 todpr = tcg_temp_new_i64();
3996 gen_helper_stck(c1, cpu_env);
3997 /* 16 bit value store in an uint32_t (only valid bits set) */
3998 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
3999 /* Shift the 64-bit value into its place as a zero-extended
4000 104-bit value. Note that "bit positions 64-103 are always
4001 non-zero so that they compare differently to STCK"; we set
4002 the least significant bit to 1. */
4003 tcg_gen_shli_i64(c2, c1, 56);
4004 tcg_gen_shri_i64(c1, c1, 8);
4005 tcg_gen_ori_i64(c2, c2, 0x10000);
4006 tcg_gen_or_i64(c2, c2, todpr);
4007 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4008 tcg_gen_addi_i64(o->in2, o->in2, 8);
4009 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4010 tcg_temp_free_i64(c1);
4011 tcg_temp_free_i64(c2);
4012 tcg_temp_free_i64(todpr);
4013 /* ??? We don't implement clock states. */
4014 gen_op_movi_cc(s, 0);
4015 return NO_EXIT;
4018 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
4020 check_privileged(s);
4021 gen_helper_sckc(cpu_env, o->in2);
4022 return NO_EXIT;
4025 static ExitStatus op_sckpf(DisasContext *s, DisasOps *o)
4027 check_privileged(s);
4028 gen_helper_sckpf(cpu_env, regs[0]);
4029 return NO_EXIT;
4032 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
4034 check_privileged(s);
4035 gen_helper_stckc(o->out, cpu_env);
4036 return NO_EXIT;
4039 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
4041 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4042 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4043 check_privileged(s);
4044 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4045 tcg_temp_free_i32(r1);
4046 tcg_temp_free_i32(r3);
4047 return NO_EXIT;
4050 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
4052 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4053 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4054 check_privileged(s);
4055 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4056 tcg_temp_free_i32(r1);
4057 tcg_temp_free_i32(r3);
4058 return NO_EXIT;
4061 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
4063 check_privileged(s);
4064 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4065 return NO_EXIT;
4068 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
4070 check_privileged(s);
4071 gen_helper_spt(cpu_env, o->in2);
4072 return NO_EXIT;
4075 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
4077 check_privileged(s);
4078 gen_helper_stfl(cpu_env);
4079 return NO_EXIT;
4082 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
4084 check_privileged(s);
4085 gen_helper_stpt(o->out, cpu_env);
4086 return NO_EXIT;
4089 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
4091 check_privileged(s);
4092 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4093 set_cc_static(s);
4094 return NO_EXIT;
4097 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
4099 check_privileged(s);
4100 gen_helper_spx(cpu_env, o->in2);
4101 return NO_EXIT;
4104 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4106 check_privileged(s);
4107 gen_helper_xsch(cpu_env, regs[1]);
4108 set_cc_static(s);
4109 return NO_EXIT;
4112 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4114 check_privileged(s);
4115 gen_helper_csch(cpu_env, regs[1]);
4116 set_cc_static(s);
4117 return NO_EXIT;
4120 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4122 check_privileged(s);
4123 gen_helper_hsch(cpu_env, regs[1]);
4124 set_cc_static(s);
4125 return NO_EXIT;
4128 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4130 check_privileged(s);
4131 gen_helper_msch(cpu_env, regs[1], o->in2);
4132 set_cc_static(s);
4133 return NO_EXIT;
4136 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4138 check_privileged(s);
4139 gen_helper_rchp(cpu_env, regs[1]);
4140 set_cc_static(s);
4141 return NO_EXIT;
4144 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4146 check_privileged(s);
4147 gen_helper_rsch(cpu_env, regs[1]);
4148 set_cc_static(s);
4149 return NO_EXIT;
4152 static ExitStatus op_sal(DisasContext *s, DisasOps *o)
4154 check_privileged(s);
4155 gen_helper_sal(cpu_env, regs[1]);
4156 return NO_EXIT;
4159 static ExitStatus op_schm(DisasContext *s, DisasOps *o)
4161 check_privileged(s);
4162 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4163 return NO_EXIT;
4166 static ExitStatus op_siga(DisasContext *s, DisasOps *o)
4168 check_privileged(s);
4169 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4170 gen_op_movi_cc(s, 3);
4171 return NO_EXIT;
4174 static ExitStatus op_stcps(DisasContext *s, DisasOps *o)
4176 check_privileged(s);
4177 /* The instruction is suppressed if not provided. */
4178 return NO_EXIT;
4181 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4183 check_privileged(s);
4184 gen_helper_ssch(cpu_env, regs[1], o->in2);
4185 set_cc_static(s);
4186 return NO_EXIT;
4189 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4191 check_privileged(s);
4192 gen_helper_stsch(cpu_env, regs[1], o->in2);
4193 set_cc_static(s);
4194 return NO_EXIT;
4197 static ExitStatus op_stcrw(DisasContext *s, DisasOps *o)
4199 check_privileged(s);
4200 gen_helper_stcrw(cpu_env, o->in2);
4201 set_cc_static(s);
4202 return NO_EXIT;
4205 static ExitStatus op_tpi(DisasContext *s, DisasOps *o)
4207 check_privileged(s);
4208 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4209 set_cc_static(s);
4210 return NO_EXIT;
4213 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4215 check_privileged(s);
4216 gen_helper_tsch(cpu_env, regs[1], o->in2);
4217 set_cc_static(s);
4218 return NO_EXIT;
4221 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4223 check_privileged(s);
4224 gen_helper_chsc(cpu_env, o->in2);
4225 set_cc_static(s);
4226 return NO_EXIT;
4229 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4231 check_privileged(s);
4232 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4233 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4234 return NO_EXIT;
4237 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4239 uint64_t i2 = get_field(s->fields, i2);
4240 TCGv_i64 t;
4242 check_privileged(s);
4244 /* It is important to do what the instruction name says: STORE THEN.
4245 If we let the output hook perform the store then if we fault and
4246 restart, we'll have the wrong SYSTEM MASK in place. */
4247 t = tcg_temp_new_i64();
4248 tcg_gen_shri_i64(t, psw_mask, 56);
4249 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4250 tcg_temp_free_i64(t);
4252 if (s->fields->op == 0xac) {
4253 tcg_gen_andi_i64(psw_mask, psw_mask,
4254 (i2 << 56) | 0x00ffffffffffffffull);
4255 } else {
4256 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4259 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4260 return EXIT_PC_STALE_NOCHAIN;
4263 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4265 check_privileged(s);
4266 gen_helper_stura(cpu_env, o->in2, o->in1);
4267 return NO_EXIT;
4270 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4272 check_privileged(s);
4273 gen_helper_sturg(cpu_env, o->in2, o->in1);
4274 return NO_EXIT;
4276 #endif
4278 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4280 gen_helper_stfle(cc_op, cpu_env, o->in2);
4281 set_cc_static(s);
4282 return NO_EXIT;
4285 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4287 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4288 return NO_EXIT;
4291 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4293 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4294 return NO_EXIT;
4297 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4299 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4300 return NO_EXIT;
4303 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4305 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4306 return NO_EXIT;
4309 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4311 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4312 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4313 gen_helper_stam(cpu_env, r1, o->in2, r3);
4314 tcg_temp_free_i32(r1);
4315 tcg_temp_free_i32(r3);
4316 return NO_EXIT;
4319 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4321 int m3 = get_field(s->fields, m3);
4322 int pos, base = s->insn->data;
4323 TCGv_i64 tmp = tcg_temp_new_i64();
4325 pos = base + ctz32(m3) * 8;
4326 switch (m3) {
4327 case 0xf:
4328 /* Effectively a 32-bit store. */
4329 tcg_gen_shri_i64(tmp, o->in1, pos);
4330 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4331 break;
4333 case 0xc:
4334 case 0x6:
4335 case 0x3:
4336 /* Effectively a 16-bit store. */
4337 tcg_gen_shri_i64(tmp, o->in1, pos);
4338 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4339 break;
4341 case 0x8:
4342 case 0x4:
4343 case 0x2:
4344 case 0x1:
4345 /* Effectively an 8-bit store. */
4346 tcg_gen_shri_i64(tmp, o->in1, pos);
4347 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4348 break;
4350 default:
4351 /* This is going to be a sequence of shifts and stores. */
4352 pos = base + 32 - 8;
4353 while (m3) {
4354 if (m3 & 0x8) {
4355 tcg_gen_shri_i64(tmp, o->in1, pos);
4356 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4357 tcg_gen_addi_i64(o->in2, o->in2, 1);
4359 m3 = (m3 << 1) & 0xf;
4360 pos -= 8;
4362 break;
4364 tcg_temp_free_i64(tmp);
4365 return NO_EXIT;
4368 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4370 int r1 = get_field(s->fields, r1);
4371 int r3 = get_field(s->fields, r3);
4372 int size = s->insn->data;
4373 TCGv_i64 tsize = tcg_const_i64(size);
4375 while (1) {
4376 if (size == 8) {
4377 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4378 } else {
4379 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4381 if (r1 == r3) {
4382 break;
4384 tcg_gen_add_i64(o->in2, o->in2, tsize);
4385 r1 = (r1 + 1) & 15;
4388 tcg_temp_free_i64(tsize);
4389 return NO_EXIT;
4392 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4394 int r1 = get_field(s->fields, r1);
4395 int r3 = get_field(s->fields, r3);
4396 TCGv_i64 t = tcg_temp_new_i64();
4397 TCGv_i64 t4 = tcg_const_i64(4);
4398 TCGv_i64 t32 = tcg_const_i64(32);
4400 while (1) {
4401 tcg_gen_shl_i64(t, regs[r1], t32);
4402 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4403 if (r1 == r3) {
4404 break;
4406 tcg_gen_add_i64(o->in2, o->in2, t4);
4407 r1 = (r1 + 1) & 15;
4410 tcg_temp_free_i64(t);
4411 tcg_temp_free_i64(t4);
4412 tcg_temp_free_i64(t32);
4413 return NO_EXIT;
4416 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4418 if (tb_cflags(s->tb) & CF_PARALLEL) {
4419 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4420 } else {
4421 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4423 return NO_EXIT;
4426 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4428 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4429 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4431 gen_helper_srst(cpu_env, r1, r2);
4433 tcg_temp_free_i32(r1);
4434 tcg_temp_free_i32(r2);
4435 set_cc_static(s);
4436 return NO_EXIT;
4439 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4441 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4442 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4444 gen_helper_srstu(cpu_env, r1, r2);
4446 tcg_temp_free_i32(r1);
4447 tcg_temp_free_i32(r2);
4448 set_cc_static(s);
4449 return NO_EXIT;
4452 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4454 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4455 return NO_EXIT;
4458 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4460 DisasCompare cmp;
4461 TCGv_i64 borrow;
4463 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4465 /* The !borrow flag is the msb of CC. Since we want the inverse of
4466 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4467 disas_jcc(s, &cmp, 8 | 4);
4468 borrow = tcg_temp_new_i64();
4469 if (cmp.is_64) {
4470 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4471 } else {
4472 TCGv_i32 t = tcg_temp_new_i32();
4473 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4474 tcg_gen_extu_i32_i64(borrow, t);
4475 tcg_temp_free_i32(t);
4477 free_compare(&cmp);
4479 tcg_gen_sub_i64(o->out, o->out, borrow);
4480 tcg_temp_free_i64(borrow);
4481 return NO_EXIT;
4484 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4486 TCGv_i32 t;
4488 update_psw_addr(s);
4489 update_cc_op(s);
4491 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4492 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4493 tcg_temp_free_i32(t);
4495 t = tcg_const_i32(s->ilen);
4496 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4497 tcg_temp_free_i32(t);
4499 gen_exception(EXCP_SVC);
4500 return EXIT_NORETURN;
4503 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4505 int cc = 0;
4507 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4508 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4509 gen_op_movi_cc(s, cc);
4510 return NO_EXIT;
4513 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4515 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4516 set_cc_static(s);
4517 return NO_EXIT;
4520 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4522 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4523 set_cc_static(s);
4524 return NO_EXIT;
4527 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4529 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4530 set_cc_static(s);
4531 return NO_EXIT;
4534 #ifndef CONFIG_USER_ONLY
4536 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4538 check_privileged(s);
4539 gen_helper_testblock(cc_op, cpu_env, o->in2);
4540 set_cc_static(s);
4541 return NO_EXIT;
4544 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4546 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4547 set_cc_static(s);
4548 return NO_EXIT;
4551 #endif
4553 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4555 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4556 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4557 tcg_temp_free_i32(l1);
4558 set_cc_static(s);
4559 return NO_EXIT;
4562 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4564 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4565 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4566 tcg_temp_free_i32(l);
4567 set_cc_static(s);
4568 return NO_EXIT;
4571 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4573 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4574 return_low128(o->out2);
4575 set_cc_static(s);
4576 return NO_EXIT;
4579 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4581 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4582 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4583 tcg_temp_free_i32(l);
4584 set_cc_static(s);
4585 return NO_EXIT;
4588 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4590 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4591 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4592 tcg_temp_free_i32(l);
4593 set_cc_static(s);
4594 return NO_EXIT;
4597 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4599 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4600 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4601 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4602 TCGv_i32 tst = tcg_temp_new_i32();
4603 int m3 = get_field(s->fields, m3);
4605 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4606 m3 = 0;
4608 if (m3 & 1) {
4609 tcg_gen_movi_i32(tst, -1);
4610 } else {
4611 tcg_gen_extrl_i64_i32(tst, regs[0]);
4612 if (s->insn->opc & 3) {
4613 tcg_gen_ext8u_i32(tst, tst);
4614 } else {
4615 tcg_gen_ext16u_i32(tst, tst);
4618 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4620 tcg_temp_free_i32(r1);
4621 tcg_temp_free_i32(r2);
4622 tcg_temp_free_i32(sizes);
4623 tcg_temp_free_i32(tst);
4624 set_cc_static(s);
4625 return NO_EXIT;
4628 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4630 TCGv_i32 t1 = tcg_const_i32(0xff);
4631 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4632 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4633 tcg_temp_free_i32(t1);
4634 set_cc_static(s);
4635 return NO_EXIT;
4638 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4640 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4641 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4642 tcg_temp_free_i32(l);
4643 return NO_EXIT;
4646 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4648 int l1 = get_field(s->fields, l1) + 1;
4649 TCGv_i32 l;
4651 /* The length must not exceed 32 bytes. */
4652 if (l1 > 32) {
4653 gen_program_exception(s, PGM_SPECIFICATION);
4654 return EXIT_NORETURN;
4656 l = tcg_const_i32(l1);
4657 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4658 tcg_temp_free_i32(l);
4659 set_cc_static(s);
4660 return NO_EXIT;
4663 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4665 int l1 = get_field(s->fields, l1) + 1;
4666 TCGv_i32 l;
4668 /* The length must be even and should not exceed 64 bytes. */
4669 if ((l1 & 1) || (l1 > 64)) {
4670 gen_program_exception(s, PGM_SPECIFICATION);
4671 return EXIT_NORETURN;
4673 l = tcg_const_i32(l1);
4674 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4675 tcg_temp_free_i32(l);
4676 set_cc_static(s);
4677 return NO_EXIT;
4681 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4683 int d1 = get_field(s->fields, d1);
4684 int d2 = get_field(s->fields, d2);
4685 int b1 = get_field(s->fields, b1);
4686 int b2 = get_field(s->fields, b2);
4687 int l = get_field(s->fields, l1);
4688 TCGv_i32 t32;
4690 o->addr1 = get_address(s, 0, b1, d1);
4692 /* If the addresses are identical, this is a store/memset of zero. */
4693 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4694 o->in2 = tcg_const_i64(0);
4696 l++;
4697 while (l >= 8) {
4698 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4699 l -= 8;
4700 if (l > 0) {
4701 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4704 if (l >= 4) {
4705 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4706 l -= 4;
4707 if (l > 0) {
4708 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4711 if (l >= 2) {
4712 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4713 l -= 2;
4714 if (l > 0) {
4715 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4718 if (l) {
4719 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4721 gen_op_movi_cc(s, 0);
4722 return NO_EXIT;
4725 /* But in general we'll defer to a helper. */
4726 o->in2 = get_address(s, 0, b2, d2);
4727 t32 = tcg_const_i32(l);
4728 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4729 tcg_temp_free_i32(t32);
4730 set_cc_static(s);
4731 return NO_EXIT;
4734 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4736 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4737 return NO_EXIT;
4740 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4742 int shift = s->insn->data & 0xff;
4743 int size = s->insn->data >> 8;
4744 uint64_t mask = ((1ull << size) - 1) << shift;
4746 assert(!o->g_in2);
4747 tcg_gen_shli_i64(o->in2, o->in2, shift);
4748 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4750 /* Produce the CC from only the bits manipulated. */
4751 tcg_gen_andi_i64(cc_dst, o->out, mask);
4752 set_cc_nz_u64(s, cc_dst);
4753 return NO_EXIT;
4756 static ExitStatus op_xi(DisasContext *s, DisasOps *o)
4758 o->in1 = tcg_temp_new_i64();
4760 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4761 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4762 } else {
4763 /* Perform the atomic operation in memory. */
4764 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4765 s->insn->data);
4768 /* Recompute also for atomic case: needed for setting CC. */
4769 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4771 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4772 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4774 return NO_EXIT;
4777 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4779 o->out = tcg_const_i64(0);
4780 return NO_EXIT;
4783 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4785 o->out = tcg_const_i64(0);
4786 o->out2 = o->out;
4787 o->g_out2 = true;
4788 return NO_EXIT;
4791 #ifndef CONFIG_USER_ONLY
4792 static ExitStatus op_clp(DisasContext *s, DisasOps *o)
4794 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4796 check_privileged(s);
4797 gen_helper_clp(cpu_env, r2);
4798 tcg_temp_free_i32(r2);
4799 set_cc_static(s);
4800 return NO_EXIT;
4803 static ExitStatus op_pcilg(DisasContext *s, DisasOps *o)
4805 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4806 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4808 check_privileged(s);
4809 gen_helper_pcilg(cpu_env, r1, r2);
4810 tcg_temp_free_i32(r1);
4811 tcg_temp_free_i32(r2);
4812 set_cc_static(s);
4813 return NO_EXIT;
4816 static ExitStatus op_pcistg(DisasContext *s, DisasOps *o)
4818 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4819 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4821 check_privileged(s);
4822 gen_helper_pcistg(cpu_env, r1, r2);
4823 tcg_temp_free_i32(r1);
4824 tcg_temp_free_i32(r2);
4825 set_cc_static(s);
4826 return NO_EXIT;
4829 static ExitStatus op_stpcifc(DisasContext *s, DisasOps *o)
4831 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4832 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4834 check_privileged(s);
4835 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4836 tcg_temp_free_i32(ar);
4837 tcg_temp_free_i32(r1);
4838 set_cc_static(s);
4839 return NO_EXIT;
4842 static ExitStatus op_sic(DisasContext *s, DisasOps *o)
4844 check_privileged(s);
4845 gen_helper_sic(cpu_env, o->in1, o->in2);
4846 return NO_EXIT;
4849 static ExitStatus op_rpcit(DisasContext *s, DisasOps *o)
4851 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4852 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4854 check_privileged(s);
4855 gen_helper_rpcit(cpu_env, r1, r2);
4856 tcg_temp_free_i32(r1);
4857 tcg_temp_free_i32(r2);
4858 set_cc_static(s);
4859 return NO_EXIT;
4862 static ExitStatus op_pcistb(DisasContext *s, DisasOps *o)
4864 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4865 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4866 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4868 check_privileged(s);
4869 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4870 tcg_temp_free_i32(ar);
4871 tcg_temp_free_i32(r1);
4872 tcg_temp_free_i32(r3);
4873 set_cc_static(s);
4874 return NO_EXIT;
4877 static ExitStatus op_mpcifc(DisasContext *s, DisasOps *o)
4879 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4880 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4882 check_privileged(s);
4883 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4884 tcg_temp_free_i32(ar);
4885 tcg_temp_free_i32(r1);
4886 set_cc_static(s);
4887 return NO_EXIT;
4889 #endif
4891 /* ====================================================================== */
4892 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4893 the original inputs), update the various cc data structures in order to
4894 be able to compute the new condition code. */
4896 static void cout_abs32(DisasContext *s, DisasOps *o)
4898 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4901 static void cout_abs64(DisasContext *s, DisasOps *o)
4903 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4906 static void cout_adds32(DisasContext *s, DisasOps *o)
4908 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4911 static void cout_adds64(DisasContext *s, DisasOps *o)
4913 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4916 static void cout_addu32(DisasContext *s, DisasOps *o)
4918 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4921 static void cout_addu64(DisasContext *s, DisasOps *o)
4923 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4926 static void cout_addc32(DisasContext *s, DisasOps *o)
4928 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4931 static void cout_addc64(DisasContext *s, DisasOps *o)
4933 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4936 static void cout_cmps32(DisasContext *s, DisasOps *o)
4938 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4941 static void cout_cmps64(DisasContext *s, DisasOps *o)
4943 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4946 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4948 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4951 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4953 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4956 static void cout_f32(DisasContext *s, DisasOps *o)
4958 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4961 static void cout_f64(DisasContext *s, DisasOps *o)
4963 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4966 static void cout_f128(DisasContext *s, DisasOps *o)
4968 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4971 static void cout_nabs32(DisasContext *s, DisasOps *o)
4973 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4976 static void cout_nabs64(DisasContext *s, DisasOps *o)
4978 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4981 static void cout_neg32(DisasContext *s, DisasOps *o)
4983 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4986 static void cout_neg64(DisasContext *s, DisasOps *o)
4988 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4991 static void cout_nz32(DisasContext *s, DisasOps *o)
4993 tcg_gen_ext32u_i64(cc_dst, o->out);
4994 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4997 static void cout_nz64(DisasContext *s, DisasOps *o)
4999 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5002 static void cout_s32(DisasContext *s, DisasOps *o)
5004 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5007 static void cout_s64(DisasContext *s, DisasOps *o)
5009 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5012 static void cout_subs32(DisasContext *s, DisasOps *o)
5014 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5017 static void cout_subs64(DisasContext *s, DisasOps *o)
5019 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5022 static void cout_subu32(DisasContext *s, DisasOps *o)
5024 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5027 static void cout_subu64(DisasContext *s, DisasOps *o)
5029 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5032 static void cout_subb32(DisasContext *s, DisasOps *o)
5034 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5037 static void cout_subb64(DisasContext *s, DisasOps *o)
5039 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5042 static void cout_tm32(DisasContext *s, DisasOps *o)
5044 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5047 static void cout_tm64(DisasContext *s, DisasOps *o)
5049 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5052 /* ====================================================================== */
5053 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5054 with the TCG register to which we will write. Used in combination with
5055 the "wout" generators, in some cases we need a new temporary, and in
5056 some cases we can write to a TCG global. */
5058 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5060 o->out = tcg_temp_new_i64();
5062 #define SPEC_prep_new 0
5064 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5066 o->out = tcg_temp_new_i64();
5067 o->out2 = tcg_temp_new_i64();
5069 #define SPEC_prep_new_P 0
5071 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5073 o->out = regs[get_field(f, r1)];
5074 o->g_out = true;
5076 #define SPEC_prep_r1 0
5078 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5080 int r1 = get_field(f, r1);
5081 o->out = regs[r1];
5082 o->out2 = regs[r1 + 1];
5083 o->g_out = o->g_out2 = true;
5085 #define SPEC_prep_r1_P SPEC_r1_even
5087 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5089 o->out = fregs[get_field(f, r1)];
5090 o->g_out = true;
5092 #define SPEC_prep_f1 0
5094 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5096 int r1 = get_field(f, r1);
5097 o->out = fregs[r1];
5098 o->out2 = fregs[r1 + 2];
5099 o->g_out = o->g_out2 = true;
5101 #define SPEC_prep_x1 SPEC_r1_f128
5103 /* ====================================================================== */
5104 /* The "Write OUTput" generators. These generally perform some non-trivial
5105 copy of data to TCG globals, or to main memory. The trivial cases are
5106 generally handled by having a "prep" generator install the TCG global
5107 as the destination of the operation. */
5109 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5111 store_reg(get_field(f, r1), o->out);
5113 #define SPEC_wout_r1 0
5115 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5117 int r1 = get_field(f, r1);
5118 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5120 #define SPEC_wout_r1_8 0
5122 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5124 int r1 = get_field(f, r1);
5125 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5127 #define SPEC_wout_r1_16 0
5129 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5131 store_reg32_i64(get_field(f, r1), o->out);
5133 #define SPEC_wout_r1_32 0
5135 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5137 store_reg32h_i64(get_field(f, r1), o->out);
5139 #define SPEC_wout_r1_32h 0
5141 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5143 int r1 = get_field(f, r1);
5144 store_reg32_i64(r1, o->out);
5145 store_reg32_i64(r1 + 1, o->out2);
5147 #define SPEC_wout_r1_P32 SPEC_r1_even
5149 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5151 int r1 = get_field(f, r1);
5152 store_reg32_i64(r1 + 1, o->out);
5153 tcg_gen_shri_i64(o->out, o->out, 32);
5154 store_reg32_i64(r1, o->out);
5156 #define SPEC_wout_r1_D32 SPEC_r1_even
5158 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5160 int r3 = get_field(f, r3);
5161 store_reg32_i64(r3, o->out);
5162 store_reg32_i64(r3 + 1, o->out2);
5164 #define SPEC_wout_r3_P32 SPEC_r3_even
5166 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5168 int r3 = get_field(f, r3);
5169 store_reg(r3, o->out);
5170 store_reg(r3 + 1, o->out2);
5172 #define SPEC_wout_r3_P64 SPEC_r3_even
5174 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5176 store_freg32_i64(get_field(f, r1), o->out);
5178 #define SPEC_wout_e1 0
5180 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5182 store_freg(get_field(f, r1), o->out);
5184 #define SPEC_wout_f1 0
5186 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5188 int f1 = get_field(s->fields, r1);
5189 store_freg(f1, o->out);
5190 store_freg(f1 + 2, o->out2);
5192 #define SPEC_wout_x1 SPEC_r1_f128
5194 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5196 if (get_field(f, r1) != get_field(f, r2)) {
5197 store_reg32_i64(get_field(f, r1), o->out);
5200 #define SPEC_wout_cond_r1r2_32 0
5202 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5204 if (get_field(f, r1) != get_field(f, r2)) {
5205 store_freg32_i64(get_field(f, r1), o->out);
5208 #define SPEC_wout_cond_e1e2 0
5210 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5212 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5214 #define SPEC_wout_m1_8 0
5216 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5218 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5220 #define SPEC_wout_m1_16 0
5222 #ifndef CONFIG_USER_ONLY
5223 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5225 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5227 #define SPEC_wout_m1_16a 0
5228 #endif
5230 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5232 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5234 #define SPEC_wout_m1_32 0
5236 #ifndef CONFIG_USER_ONLY
5237 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5239 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5241 #define SPEC_wout_m1_32a 0
5242 #endif
5244 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5246 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5248 #define SPEC_wout_m1_64 0
5250 #ifndef CONFIG_USER_ONLY
5251 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5253 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5255 #define SPEC_wout_m1_64a 0
5256 #endif
5258 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5260 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5262 #define SPEC_wout_m2_32 0
5264 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5266 store_reg(get_field(f, r1), o->in2);
5268 #define SPEC_wout_in2_r1 0
5270 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5272 store_reg32_i64(get_field(f, r1), o->in2);
5274 #define SPEC_wout_in2_r1_32 0
5276 /* ====================================================================== */
5277 /* The "INput 1" generators. These load the first operand to an insn. */
5279 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5281 o->in1 = load_reg(get_field(f, r1));
5283 #define SPEC_in1_r1 0
5285 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5287 o->in1 = regs[get_field(f, r1)];
5288 o->g_in1 = true;
5290 #define SPEC_in1_r1_o 0
5292 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5294 o->in1 = tcg_temp_new_i64();
5295 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5297 #define SPEC_in1_r1_32s 0
5299 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5301 o->in1 = tcg_temp_new_i64();
5302 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5304 #define SPEC_in1_r1_32u 0
5306 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5308 o->in1 = tcg_temp_new_i64();
5309 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5311 #define SPEC_in1_r1_sr32 0
5313 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5315 o->in1 = load_reg(get_field(f, r1) + 1);
5317 #define SPEC_in1_r1p1 SPEC_r1_even
5319 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5321 o->in1 = tcg_temp_new_i64();
5322 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5324 #define SPEC_in1_r1p1_32s SPEC_r1_even
5326 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5328 o->in1 = tcg_temp_new_i64();
5329 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5331 #define SPEC_in1_r1p1_32u SPEC_r1_even
5333 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5335 int r1 = get_field(f, r1);
5336 o->in1 = tcg_temp_new_i64();
5337 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5339 #define SPEC_in1_r1_D32 SPEC_r1_even
5341 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5343 o->in1 = load_reg(get_field(f, r2));
5345 #define SPEC_in1_r2 0
5347 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5349 o->in1 = tcg_temp_new_i64();
5350 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5352 #define SPEC_in1_r2_sr32 0
5354 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5356 o->in1 = load_reg(get_field(f, r3));
5358 #define SPEC_in1_r3 0
5360 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5362 o->in1 = regs[get_field(f, r3)];
5363 o->g_in1 = true;
5365 #define SPEC_in1_r3_o 0
5367 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5369 o->in1 = tcg_temp_new_i64();
5370 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5372 #define SPEC_in1_r3_32s 0
5374 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5376 o->in1 = tcg_temp_new_i64();
5377 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5379 #define SPEC_in1_r3_32u 0
5381 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5383 int r3 = get_field(f, r3);
5384 o->in1 = tcg_temp_new_i64();
5385 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5387 #define SPEC_in1_r3_D32 SPEC_r3_even
5389 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5391 o->in1 = load_freg32_i64(get_field(f, r1));
5393 #define SPEC_in1_e1 0
5395 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5397 o->in1 = fregs[get_field(f, r1)];
5398 o->g_in1 = true;
5400 #define SPEC_in1_f1_o 0
5402 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5404 int r1 = get_field(f, r1);
5405 o->out = fregs[r1];
5406 o->out2 = fregs[r1 + 2];
5407 o->g_out = o->g_out2 = true;
5409 #define SPEC_in1_x1_o SPEC_r1_f128
5411 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5413 o->in1 = fregs[get_field(f, r3)];
5414 o->g_in1 = true;
5416 #define SPEC_in1_f3_o 0
5418 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5420 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5422 #define SPEC_in1_la1 0
5424 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5426 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5427 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5429 #define SPEC_in1_la2 0
5431 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5433 in1_la1(s, f, o);
5434 o->in1 = tcg_temp_new_i64();
5435 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5437 #define SPEC_in1_m1_8u 0
5439 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5441 in1_la1(s, f, o);
5442 o->in1 = tcg_temp_new_i64();
5443 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5445 #define SPEC_in1_m1_16s 0
5447 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5449 in1_la1(s, f, o);
5450 o->in1 = tcg_temp_new_i64();
5451 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5453 #define SPEC_in1_m1_16u 0
5455 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5457 in1_la1(s, f, o);
5458 o->in1 = tcg_temp_new_i64();
5459 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5461 #define SPEC_in1_m1_32s 0
5463 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5465 in1_la1(s, f, o);
5466 o->in1 = tcg_temp_new_i64();
5467 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5469 #define SPEC_in1_m1_32u 0
5471 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5473 in1_la1(s, f, o);
5474 o->in1 = tcg_temp_new_i64();
5475 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5477 #define SPEC_in1_m1_64 0
5479 /* ====================================================================== */
5480 /* The "INput 2" generators. These load the second operand to an insn. */
5482 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5484 o->in2 = regs[get_field(f, r1)];
5485 o->g_in2 = true;
5487 #define SPEC_in2_r1_o 0
5489 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5491 o->in2 = tcg_temp_new_i64();
5492 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5494 #define SPEC_in2_r1_16u 0
5496 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5498 o->in2 = tcg_temp_new_i64();
5499 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5501 #define SPEC_in2_r1_32u 0
5503 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5505 int r1 = get_field(f, r1);
5506 o->in2 = tcg_temp_new_i64();
5507 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5509 #define SPEC_in2_r1_D32 SPEC_r1_even
5511 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5513 o->in2 = load_reg(get_field(f, r2));
5515 #define SPEC_in2_r2 0
5517 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5519 o->in2 = regs[get_field(f, r2)];
5520 o->g_in2 = true;
5522 #define SPEC_in2_r2_o 0
5524 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5526 int r2 = get_field(f, r2);
5527 if (r2 != 0) {
5528 o->in2 = load_reg(r2);
5531 #define SPEC_in2_r2_nz 0
5533 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5535 o->in2 = tcg_temp_new_i64();
5536 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5538 #define SPEC_in2_r2_8s 0
5540 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5542 o->in2 = tcg_temp_new_i64();
5543 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5545 #define SPEC_in2_r2_8u 0
5547 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5549 o->in2 = tcg_temp_new_i64();
5550 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5552 #define SPEC_in2_r2_16s 0
5554 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5556 o->in2 = tcg_temp_new_i64();
5557 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5559 #define SPEC_in2_r2_16u 0
5561 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5563 o->in2 = load_reg(get_field(f, r3));
5565 #define SPEC_in2_r3 0
5567 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5569 o->in2 = tcg_temp_new_i64();
5570 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5572 #define SPEC_in2_r3_sr32 0
5574 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5576 o->in2 = tcg_temp_new_i64();
5577 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5579 #define SPEC_in2_r2_32s 0
5581 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5583 o->in2 = tcg_temp_new_i64();
5584 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5586 #define SPEC_in2_r2_32u 0
5588 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5590 o->in2 = tcg_temp_new_i64();
5591 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5593 #define SPEC_in2_r2_sr32 0
5595 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5597 o->in2 = load_freg32_i64(get_field(f, r2));
5599 #define SPEC_in2_e2 0
5601 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5603 o->in2 = fregs[get_field(f, r2)];
5604 o->g_in2 = true;
5606 #define SPEC_in2_f2_o 0
5608 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5610 int r2 = get_field(f, r2);
5611 o->in1 = fregs[r2];
5612 o->in2 = fregs[r2 + 2];
5613 o->g_in1 = o->g_in2 = true;
5615 #define SPEC_in2_x2_o SPEC_r2_f128
5617 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5619 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5621 #define SPEC_in2_ra2 0
5623 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5625 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5626 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5628 #define SPEC_in2_a2 0
5630 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5632 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5634 #define SPEC_in2_ri2 0
5636 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5638 help_l2_shift(s, f, o, 31);
5640 #define SPEC_in2_sh32 0
5642 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5644 help_l2_shift(s, f, o, 63);
5646 #define SPEC_in2_sh64 0
5648 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5650 in2_a2(s, f, o);
5651 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5653 #define SPEC_in2_m2_8u 0
5655 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5657 in2_a2(s, f, o);
5658 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5660 #define SPEC_in2_m2_16s 0
5662 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5664 in2_a2(s, f, o);
5665 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5667 #define SPEC_in2_m2_16u 0
5669 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5671 in2_a2(s, f, o);
5672 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5674 #define SPEC_in2_m2_32s 0
5676 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5678 in2_a2(s, f, o);
5679 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5681 #define SPEC_in2_m2_32u 0
5683 #ifndef CONFIG_USER_ONLY
5684 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5686 in2_a2(s, f, o);
5687 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5689 #define SPEC_in2_m2_32ua 0
5690 #endif
5692 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5694 in2_a2(s, f, o);
5695 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5697 #define SPEC_in2_m2_64 0
5699 #ifndef CONFIG_USER_ONLY
5700 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5702 in2_a2(s, f, o);
5703 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5705 #define SPEC_in2_m2_64a 0
5706 #endif
5708 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5710 in2_ri2(s, f, o);
5711 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5713 #define SPEC_in2_mri2_16u 0
5715 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5717 in2_ri2(s, f, o);
5718 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5720 #define SPEC_in2_mri2_32s 0
5722 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5724 in2_ri2(s, f, o);
5725 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5727 #define SPEC_in2_mri2_32u 0
5729 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5731 in2_ri2(s, f, o);
5732 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5734 #define SPEC_in2_mri2_64 0
5736 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5738 o->in2 = tcg_const_i64(get_field(f, i2));
5740 #define SPEC_in2_i2 0
5742 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5744 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5746 #define SPEC_in2_i2_8u 0
5748 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5750 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5752 #define SPEC_in2_i2_16u 0
5754 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5756 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5758 #define SPEC_in2_i2_32u 0
5760 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5762 uint64_t i2 = (uint16_t)get_field(f, i2);
5763 o->in2 = tcg_const_i64(i2 << s->insn->data);
5765 #define SPEC_in2_i2_16u_shl 0
5767 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5769 uint64_t i2 = (uint32_t)get_field(f, i2);
5770 o->in2 = tcg_const_i64(i2 << s->insn->data);
5772 #define SPEC_in2_i2_32u_shl 0
5774 #ifndef CONFIG_USER_ONLY
5775 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5777 o->in2 = tcg_const_i64(s->fields->raw_insn);
5779 #define SPEC_in2_insn 0
5780 #endif
5782 /* ====================================================================== */
5784 /* Find opc within the table of insns. This is formulated as a switch
5785 statement so that (1) we get compile-time notice of cut-paste errors
5786 for duplicated opcodes, and (2) the compiler generates the binary
5787 search tree, rather than us having to post-process the table. */
5789 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5790 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5792 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5794 enum DisasInsnEnum {
5795 #include "insn-data.def"
5798 #undef D
5799 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5800 .opc = OPC, \
5801 .fmt = FMT_##FT, \
5802 .fac = FAC_##FC, \
5803 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5804 .name = #NM, \
5805 .help_in1 = in1_##I1, \
5806 .help_in2 = in2_##I2, \
5807 .help_prep = prep_##P, \
5808 .help_wout = wout_##W, \
5809 .help_cout = cout_##CC, \
5810 .help_op = op_##OP, \
5811 .data = D \
5814 /* Allow 0 to be used for NULL in the table below. */
5815 #define in1_0 NULL
5816 #define in2_0 NULL
5817 #define prep_0 NULL
5818 #define wout_0 NULL
5819 #define cout_0 NULL
5820 #define op_0 NULL
5822 #define SPEC_in1_0 0
5823 #define SPEC_in2_0 0
5824 #define SPEC_prep_0 0
5825 #define SPEC_wout_0 0
5827 /* Give smaller names to the various facilities. */
5828 #define FAC_Z S390_FEAT_ZARCH
5829 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5830 #define FAC_DFP S390_FEAT_DFP
5831 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5832 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5833 #define FAC_EE S390_FEAT_EXECUTE_EXT
5834 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5835 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5836 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5837 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5838 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5839 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5840 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5841 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5842 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5843 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5844 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5845 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5846 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5847 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5848 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5849 #define FAC_SFLE S390_FEAT_STFLE
5850 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5851 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5852 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5853 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5854 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5855 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5856 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5857 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5858 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5859 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5860 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5861 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5862 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5863 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5864 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5865 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5867 static const DisasInsn insn_info[] = {
5868 #include "insn-data.def"
5871 #undef D
5872 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5873 case OPC: return &insn_info[insn_ ## NM];
5875 static const DisasInsn *lookup_opc(uint16_t opc)
5877 switch (opc) {
5878 #include "insn-data.def"
5879 default:
5880 return NULL;
5884 #undef D
5885 #undef C
5887 /* Extract a field from the insn. The INSN should be left-aligned in
5888 the uint64_t so that we can more easily utilize the big-bit-endian
5889 definitions we extract from the Principals of Operation. */
5891 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5893 uint32_t r, m;
5895 if (f->size == 0) {
5896 return;
5899 /* Zero extract the field from the insn. */
5900 r = (insn << f->beg) >> (64 - f->size);
5902 /* Sign-extend, or un-swap the field as necessary. */
5903 switch (f->type) {
5904 case 0: /* unsigned */
5905 break;
5906 case 1: /* signed */
5907 assert(f->size <= 32);
5908 m = 1u << (f->size - 1);
5909 r = (r ^ m) - m;
5910 break;
5911 case 2: /* dl+dh split, signed 20 bit. */
5912 r = ((int8_t)r << 12) | (r >> 8);
5913 break;
5914 default:
5915 abort();
5918 /* Validate that the "compressed" encoding we selected above is valid.
5919 I.e. we havn't make two different original fields overlap. */
5920 assert(((o->presentC >> f->indexC) & 1) == 0);
5921 o->presentC |= 1 << f->indexC;
5922 o->presentO |= 1 << f->indexO;
5924 o->c[f->indexC] = r;
5927 /* Lookup the insn at the current PC, extracting the operands into O and
5928 returning the info struct for the insn. Returns NULL for invalid insn. */
5930 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5931 DisasFields *f)
5933 uint64_t insn, pc = s->pc;
5934 int op, op2, ilen;
5935 const DisasInsn *info;
5937 if (unlikely(s->ex_value)) {
5938 /* Drop the EX data now, so that it's clear on exception paths. */
5939 TCGv_i64 zero = tcg_const_i64(0);
5940 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5941 tcg_temp_free_i64(zero);
5943 /* Extract the values saved by EXECUTE. */
5944 insn = s->ex_value & 0xffffffffffff0000ull;
5945 ilen = s->ex_value & 0xf;
5946 op = insn >> 56;
5947 } else {
5948 insn = ld_code2(env, pc);
5949 op = (insn >> 8) & 0xff;
5950 ilen = get_ilen(op);
5951 switch (ilen) {
5952 case 2:
5953 insn = insn << 48;
5954 break;
5955 case 4:
5956 insn = ld_code4(env, pc) << 32;
5957 break;
5958 case 6:
5959 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5960 break;
5961 default:
5962 g_assert_not_reached();
5965 s->next_pc = s->pc + ilen;
5966 s->ilen = ilen;
5968 /* We can't actually determine the insn format until we've looked up
5969 the full insn opcode. Which we can't do without locating the
5970 secondary opcode. Assume by default that OP2 is at bit 40; for
5971 those smaller insns that don't actually have a secondary opcode
5972 this will correctly result in OP2 = 0. */
5973 switch (op) {
5974 case 0x01: /* E */
5975 case 0x80: /* S */
5976 case 0x82: /* S */
5977 case 0x93: /* S */
5978 case 0xb2: /* S, RRF, RRE, IE */
5979 case 0xb3: /* RRE, RRD, RRF */
5980 case 0xb9: /* RRE, RRF */
5981 case 0xe5: /* SSE, SIL */
5982 op2 = (insn << 8) >> 56;
5983 break;
5984 case 0xa5: /* RI */
5985 case 0xa7: /* RI */
5986 case 0xc0: /* RIL */
5987 case 0xc2: /* RIL */
5988 case 0xc4: /* RIL */
5989 case 0xc6: /* RIL */
5990 case 0xc8: /* SSF */
5991 case 0xcc: /* RIL */
5992 op2 = (insn << 12) >> 60;
5993 break;
5994 case 0xc5: /* MII */
5995 case 0xc7: /* SMI */
5996 case 0xd0 ... 0xdf: /* SS */
5997 case 0xe1: /* SS */
5998 case 0xe2: /* SS */
5999 case 0xe8: /* SS */
6000 case 0xe9: /* SS */
6001 case 0xea: /* SS */
6002 case 0xee ... 0xf3: /* SS */
6003 case 0xf8 ... 0xfd: /* SS */
6004 op2 = 0;
6005 break;
6006 default:
6007 op2 = (insn << 40) >> 56;
6008 break;
6011 memset(f, 0, sizeof(*f));
6012 f->raw_insn = insn;
6013 f->op = op;
6014 f->op2 = op2;
6016 /* Lookup the instruction. */
6017 info = lookup_opc(op << 8 | op2);
6019 /* If we found it, extract the operands. */
6020 if (info != NULL) {
6021 DisasFormat fmt = info->fmt;
6022 int i;
6024 for (i = 0; i < NUM_C_FIELD; ++i) {
6025 extract_field(f, &format_info[fmt].op[i], insn);
6028 return info;
6031 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
6033 const DisasInsn *insn;
6034 ExitStatus ret = NO_EXIT;
6035 DisasFields f;
6036 DisasOps o;
6038 /* Search for the insn in the table. */
6039 insn = extract_insn(env, s, &f);
6041 /* Not found means unimplemented/illegal opcode. */
6042 if (insn == NULL) {
6043 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6044 f.op, f.op2);
6045 gen_illegal_opcode(s);
6046 return EXIT_NORETURN;
6049 #ifndef CONFIG_USER_ONLY
6050 if (s->tb->flags & FLAG_MASK_PER) {
6051 TCGv_i64 addr = tcg_const_i64(s->pc);
6052 gen_helper_per_ifetch(cpu_env, addr);
6053 tcg_temp_free_i64(addr);
6055 #endif
6057 /* Check for insn specification exceptions. */
6058 if (insn->spec) {
6059 int spec = insn->spec, excp = 0, r;
6061 if (spec & SPEC_r1_even) {
6062 r = get_field(&f, r1);
6063 if (r & 1) {
6064 excp = PGM_SPECIFICATION;
6067 if (spec & SPEC_r2_even) {
6068 r = get_field(&f, r2);
6069 if (r & 1) {
6070 excp = PGM_SPECIFICATION;
6073 if (spec & SPEC_r3_even) {
6074 r = get_field(&f, r3);
6075 if (r & 1) {
6076 excp = PGM_SPECIFICATION;
6079 if (spec & SPEC_r1_f128) {
6080 r = get_field(&f, r1);
6081 if (r > 13) {
6082 excp = PGM_SPECIFICATION;
6085 if (spec & SPEC_r2_f128) {
6086 r = get_field(&f, r2);
6087 if (r > 13) {
6088 excp = PGM_SPECIFICATION;
6091 if (excp) {
6092 gen_program_exception(s, excp);
6093 return EXIT_NORETURN;
6097 /* Set up the strutures we use to communicate with the helpers. */
6098 s->insn = insn;
6099 s->fields = &f;
6100 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6101 o.out = NULL;
6102 o.out2 = NULL;
6103 o.in1 = NULL;
6104 o.in2 = NULL;
6105 o.addr1 = NULL;
6107 /* Implement the instruction. */
6108 if (insn->help_in1) {
6109 insn->help_in1(s, &f, &o);
6111 if (insn->help_in2) {
6112 insn->help_in2(s, &f, &o);
6114 if (insn->help_prep) {
6115 insn->help_prep(s, &f, &o);
6117 if (insn->help_op) {
6118 ret = insn->help_op(s, &o);
6120 if (insn->help_wout) {
6121 insn->help_wout(s, &f, &o);
6123 if (insn->help_cout) {
6124 insn->help_cout(s, &o);
6127 /* Free any temporaries created by the helpers. */
6128 if (o.out && !o.g_out) {
6129 tcg_temp_free_i64(o.out);
6131 if (o.out2 && !o.g_out2) {
6132 tcg_temp_free_i64(o.out2);
6134 if (o.in1 && !o.g_in1) {
6135 tcg_temp_free_i64(o.in1);
6137 if (o.in2 && !o.g_in2) {
6138 tcg_temp_free_i64(o.in2);
6140 if (o.addr1) {
6141 tcg_temp_free_i64(o.addr1);
6144 #ifndef CONFIG_USER_ONLY
6145 if (s->tb->flags & FLAG_MASK_PER) {
6146 /* An exception might be triggered, save PSW if not already done. */
6147 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
6148 tcg_gen_movi_i64(psw_addr, s->next_pc);
6151 /* Call the helper to check for a possible PER exception. */
6152 gen_helper_per_check_exception(cpu_env);
6154 #endif
6156 /* Advance to the next instruction. */
6157 s->pc = s->next_pc;
6158 return ret;
6161 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
6163 CPUS390XState *env = cs->env_ptr;
6164 DisasContext dc;
6165 target_ulong pc_start;
6166 uint64_t next_page_start;
6167 int num_insns, max_insns;
6168 ExitStatus status;
6169 bool do_debug;
6171 pc_start = tb->pc;
6173 /* 31-bit mode */
6174 if (!(tb->flags & FLAG_MASK_64)) {
6175 pc_start &= 0x7fffffff;
6178 dc.tb = tb;
6179 dc.pc = pc_start;
6180 dc.cc_op = CC_OP_DYNAMIC;
6181 dc.ex_value = tb->cs_base;
6182 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
6184 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
6186 num_insns = 0;
6187 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
6188 if (max_insns == 0) {
6189 max_insns = CF_COUNT_MASK;
6191 if (max_insns > TCG_MAX_INSNS) {
6192 max_insns = TCG_MAX_INSNS;
6195 gen_tb_start(tb);
6197 do {
6198 tcg_gen_insn_start(dc.pc, dc.cc_op);
6199 num_insns++;
6201 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
6202 status = EXIT_PC_STALE;
6203 do_debug = true;
6204 /* The address covered by the breakpoint must be included in
6205 [tb->pc, tb->pc + tb->size) in order to for it to be
6206 properly cleared -- thus we increment the PC here so that
6207 the logic setting tb->size below does the right thing. */
6208 dc.pc += 2;
6209 break;
6212 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
6213 gen_io_start();
6216 status = translate_one(env, &dc);
6218 /* If we reach a page boundary, are single stepping,
6219 or exhaust instruction count, stop generation. */
6220 if (status == NO_EXIT
6221 && (dc.pc >= next_page_start
6222 || tcg_op_buf_full()
6223 || num_insns >= max_insns
6224 || singlestep
6225 || cs->singlestep_enabled
6226 || dc.ex_value)) {
6227 status = EXIT_PC_STALE;
6229 } while (status == NO_EXIT);
6231 if (tb_cflags(tb) & CF_LAST_IO) {
6232 gen_io_end();
6235 switch (status) {
6236 case EXIT_GOTO_TB:
6237 case EXIT_NORETURN:
6238 break;
6239 case EXIT_PC_STALE:
6240 case EXIT_PC_STALE_NOCHAIN:
6241 update_psw_addr(&dc);
6242 /* FALLTHRU */
6243 case EXIT_PC_UPDATED:
6244 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6245 cc op type is in env */
6246 update_cc_op(&dc);
6247 /* FALLTHRU */
6248 case EXIT_PC_CC_UPDATED:
6249 /* Exit the TB, either by raising a debug exception or by return. */
6250 if (do_debug) {
6251 gen_exception(EXCP_DEBUG);
6252 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
6253 tcg_gen_exit_tb(0);
6254 } else {
6255 tcg_gen_lookup_and_goto_ptr();
6257 break;
6258 default:
6259 g_assert_not_reached();
6262 gen_tb_end(tb, num_insns);
6264 tb->size = dc.pc - pc_start;
6265 tb->icount = num_insns;
6267 #if defined(S390X_DEBUG_DISAS)
6268 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
6269 && qemu_log_in_addr_range(pc_start)) {
6270 qemu_log_lock();
6271 if (unlikely(dc.ex_value)) {
6272 /* ??? Unfortunately log_target_disas can't use host memory. */
6273 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
6274 } else {
6275 qemu_log("IN: %s\n", lookup_symbol(pc_start));
6276 log_target_disas(cs, pc_start, dc.pc - pc_start);
6277 qemu_log("\n");
6279 qemu_log_unlock();
6281 #endif
6284 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6285 target_ulong *data)
6287 int cc_op = data[1];
6288 env->psw.addr = data[0];
6289 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6290 env->cc_op = cc_op;