s390x/tcg: wire up pci instructions
[qemu/ar7.git] / target / s390x / translate.c
blobb470d691d320b046fdd494486b129a8351fcfe95
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/log.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t ex_value;
58 uint64_t pc, next_pc;
59 uint32_t ilen;
60 enum cc_op cc_op;
61 bool singlestep_enabled;
64 /* Information carried about a condition to be evaluated. */
65 typedef struct {
66 TCGCond cond:8;
67 bool is_64;
68 bool g1;
69 bool g2;
70 union {
71 struct { TCGv_i64 a, b; } s64;
72 struct { TCGv_i32 a, b; } s32;
73 } u;
74 } DisasCompare;
76 /* is_jmp field values */
77 #define DISAS_EXCP DISAS_TARGET_0
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
91 return pc;
94 static TCGv_i64 psw_addr;
95 static TCGv_i64 psw_mask;
96 static TCGv_i64 gbea;
98 static TCGv_i32 cc_op;
99 static TCGv_i64 cc_src;
100 static TCGv_i64 cc_dst;
101 static TCGv_i64 cc_vr;
103 static char cpu_reg_names[32][4];
104 static TCGv_i64 regs[16];
105 static TCGv_i64 fregs[16];
107 void s390x_translate_init(void)
109 int i;
111 psw_addr = tcg_global_mem_new_i64(cpu_env,
112 offsetof(CPUS390XState, psw.addr),
113 "psw_addr");
114 psw_mask = tcg_global_mem_new_i64(cpu_env,
115 offsetof(CPUS390XState, psw.mask),
116 "psw_mask");
117 gbea = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUS390XState, gbea),
119 "gbea");
121 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
122 "cc_op");
123 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
124 "cc_src");
125 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
126 "cc_dst");
127 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
128 "cc_vr");
130 for (i = 0; i < 16; i++) {
131 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
132 regs[i] = tcg_global_mem_new(cpu_env,
133 offsetof(CPUS390XState, regs[i]),
134 cpu_reg_names[i]);
137 for (i = 0; i < 16; i++) {
138 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
139 fregs[i] = tcg_global_mem_new(cpu_env,
140 offsetof(CPUS390XState, vregs[i][0].d),
141 cpu_reg_names[i + 16]);
145 static TCGv_i64 load_reg(int reg)
147 TCGv_i64 r = tcg_temp_new_i64();
148 tcg_gen_mov_i64(r, regs[reg]);
149 return r;
152 static TCGv_i64 load_freg32_i64(int reg)
154 TCGv_i64 r = tcg_temp_new_i64();
155 tcg_gen_shri_i64(r, fregs[reg], 32);
156 return r;
159 static void store_reg(int reg, TCGv_i64 v)
161 tcg_gen_mov_i64(regs[reg], v);
164 static void store_freg(int reg, TCGv_i64 v)
166 tcg_gen_mov_i64(fregs[reg], v);
169 static void store_reg32_i64(int reg, TCGv_i64 v)
171 /* 32 bit register writes keep the upper half */
172 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
175 static void store_reg32h_i64(int reg, TCGv_i64 v)
177 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
180 static void store_freg32_i64(int reg, TCGv_i64 v)
182 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
185 static void return_low128(TCGv_i64 dest)
187 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
190 static void update_psw_addr(DisasContext *s)
192 /* psw.addr */
193 tcg_gen_movi_i64(psw_addr, s->pc);
196 static void per_branch(DisasContext *s, bool to_next)
198 #ifndef CONFIG_USER_ONLY
199 tcg_gen_movi_i64(gbea, s->pc);
201 if (s->tb->flags & FLAG_MASK_PER) {
202 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
203 gen_helper_per_branch(cpu_env, gbea, next_pc);
204 if (to_next) {
205 tcg_temp_free_i64(next_pc);
208 #endif
211 static void per_branch_cond(DisasContext *s, TCGCond cond,
212 TCGv_i64 arg1, TCGv_i64 arg2)
214 #ifndef CONFIG_USER_ONLY
215 if (s->tb->flags & FLAG_MASK_PER) {
216 TCGLabel *lab = gen_new_label();
217 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
219 tcg_gen_movi_i64(gbea, s->pc);
220 gen_helper_per_branch(cpu_env, gbea, psw_addr);
222 gen_set_label(lab);
223 } else {
224 TCGv_i64 pc = tcg_const_i64(s->pc);
225 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
226 tcg_temp_free_i64(pc);
228 #endif
231 static void per_breaking_event(DisasContext *s)
233 tcg_gen_movi_i64(gbea, s->pc);
236 static void update_cc_op(DisasContext *s)
238 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
239 tcg_gen_movi_i32(cc_op, s->cc_op);
243 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
245 return (uint64_t)cpu_lduw_code(env, pc);
248 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
250 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
253 static int get_mem_index(DisasContext *s)
255 switch (s->tb->flags & FLAG_MASK_ASC) {
256 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
257 return 0;
258 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
259 return 1;
260 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
261 return 2;
262 default:
263 tcg_abort();
264 break;
268 static void gen_exception(int excp)
270 TCGv_i32 tmp = tcg_const_i32(excp);
271 gen_helper_exception(cpu_env, tmp);
272 tcg_temp_free_i32(tmp);
275 static void gen_program_exception(DisasContext *s, int code)
277 TCGv_i32 tmp;
279 /* Remember what pgm exeption this was. */
280 tmp = tcg_const_i32(code);
281 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
282 tcg_temp_free_i32(tmp);
284 tmp = tcg_const_i32(s->ilen);
285 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
286 tcg_temp_free_i32(tmp);
288 /* update the psw */
289 update_psw_addr(s);
291 /* Save off cc. */
292 update_cc_op(s);
294 /* Trigger exception. */
295 gen_exception(EXCP_PGM);
298 static inline void gen_illegal_opcode(DisasContext *s)
300 gen_program_exception(s, PGM_OPERATION);
303 static inline void gen_trap(DisasContext *s)
305 TCGv_i32 t;
307 /* Set DXC to 0xff. */
308 t = tcg_temp_new_i32();
309 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
310 tcg_gen_ori_i32(t, t, 0xff00);
311 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
312 tcg_temp_free_i32(t);
314 gen_program_exception(s, PGM_DATA);
317 #ifndef CONFIG_USER_ONLY
318 static void check_privileged(DisasContext *s)
320 if (s->tb->flags & FLAG_MASK_PSTATE) {
321 gen_program_exception(s, PGM_PRIVILEGED);
324 #endif
326 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
328 TCGv_i64 tmp = tcg_temp_new_i64();
329 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
331 /* Note that d2 is limited to 20 bits, signed. If we crop negative
332 displacements early we create larger immedate addends. */
334 /* Note that addi optimizes the imm==0 case. */
335 if (b2 && x2) {
336 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
337 tcg_gen_addi_i64(tmp, tmp, d2);
338 } else if (b2) {
339 tcg_gen_addi_i64(tmp, regs[b2], d2);
340 } else if (x2) {
341 tcg_gen_addi_i64(tmp, regs[x2], d2);
342 } else {
343 if (need_31) {
344 d2 &= 0x7fffffff;
345 need_31 = false;
347 tcg_gen_movi_i64(tmp, d2);
349 if (need_31) {
350 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
353 return tmp;
356 static inline bool live_cc_data(DisasContext *s)
358 return (s->cc_op != CC_OP_DYNAMIC
359 && s->cc_op != CC_OP_STATIC
360 && s->cc_op > 3);
363 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
365 if (live_cc_data(s)) {
366 tcg_gen_discard_i64(cc_src);
367 tcg_gen_discard_i64(cc_dst);
368 tcg_gen_discard_i64(cc_vr);
370 s->cc_op = CC_OP_CONST0 + val;
373 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
375 if (live_cc_data(s)) {
376 tcg_gen_discard_i64(cc_src);
377 tcg_gen_discard_i64(cc_vr);
379 tcg_gen_mov_i64(cc_dst, dst);
380 s->cc_op = op;
383 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
384 TCGv_i64 dst)
386 if (live_cc_data(s)) {
387 tcg_gen_discard_i64(cc_vr);
389 tcg_gen_mov_i64(cc_src, src);
390 tcg_gen_mov_i64(cc_dst, dst);
391 s->cc_op = op;
394 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
395 TCGv_i64 dst, TCGv_i64 vr)
397 tcg_gen_mov_i64(cc_src, src);
398 tcg_gen_mov_i64(cc_dst, dst);
399 tcg_gen_mov_i64(cc_vr, vr);
400 s->cc_op = op;
403 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
405 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
408 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
410 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
413 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
415 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
418 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
420 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
423 /* CC value is in env->cc_op */
424 static void set_cc_static(DisasContext *s)
426 if (live_cc_data(s)) {
427 tcg_gen_discard_i64(cc_src);
428 tcg_gen_discard_i64(cc_dst);
429 tcg_gen_discard_i64(cc_vr);
431 s->cc_op = CC_OP_STATIC;
434 /* calculates cc into cc_op */
435 static void gen_op_calc_cc(DisasContext *s)
437 TCGv_i32 local_cc_op = NULL;
438 TCGv_i64 dummy = NULL;
440 switch (s->cc_op) {
441 default:
442 dummy = tcg_const_i64(0);
443 /* FALLTHRU */
444 case CC_OP_ADD_64:
445 case CC_OP_ADDU_64:
446 case CC_OP_ADDC_64:
447 case CC_OP_SUB_64:
448 case CC_OP_SUBU_64:
449 case CC_OP_SUBB_64:
450 case CC_OP_ADD_32:
451 case CC_OP_ADDU_32:
452 case CC_OP_ADDC_32:
453 case CC_OP_SUB_32:
454 case CC_OP_SUBU_32:
455 case CC_OP_SUBB_32:
456 local_cc_op = tcg_const_i32(s->cc_op);
457 break;
458 case CC_OP_CONST0:
459 case CC_OP_CONST1:
460 case CC_OP_CONST2:
461 case CC_OP_CONST3:
462 case CC_OP_STATIC:
463 case CC_OP_DYNAMIC:
464 break;
467 switch (s->cc_op) {
468 case CC_OP_CONST0:
469 case CC_OP_CONST1:
470 case CC_OP_CONST2:
471 case CC_OP_CONST3:
472 /* s->cc_op is the cc value */
473 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
474 break;
475 case CC_OP_STATIC:
476 /* env->cc_op already is the cc value */
477 break;
478 case CC_OP_NZ:
479 case CC_OP_ABS_64:
480 case CC_OP_NABS_64:
481 case CC_OP_ABS_32:
482 case CC_OP_NABS_32:
483 case CC_OP_LTGT0_32:
484 case CC_OP_LTGT0_64:
485 case CC_OP_COMP_32:
486 case CC_OP_COMP_64:
487 case CC_OP_NZ_F32:
488 case CC_OP_NZ_F64:
489 case CC_OP_FLOGR:
490 /* 1 argument */
491 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
492 break;
493 case CC_OP_ICM:
494 case CC_OP_LTGT_32:
495 case CC_OP_LTGT_64:
496 case CC_OP_LTUGTU_32:
497 case CC_OP_LTUGTU_64:
498 case CC_OP_TM_32:
499 case CC_OP_TM_64:
500 case CC_OP_SLA_32:
501 case CC_OP_SLA_64:
502 case CC_OP_NZ_F128:
503 /* 2 arguments */
504 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
505 break;
506 case CC_OP_ADD_64:
507 case CC_OP_ADDU_64:
508 case CC_OP_ADDC_64:
509 case CC_OP_SUB_64:
510 case CC_OP_SUBU_64:
511 case CC_OP_SUBB_64:
512 case CC_OP_ADD_32:
513 case CC_OP_ADDU_32:
514 case CC_OP_ADDC_32:
515 case CC_OP_SUB_32:
516 case CC_OP_SUBU_32:
517 case CC_OP_SUBB_32:
518 /* 3 arguments */
519 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
520 break;
521 case CC_OP_DYNAMIC:
522 /* unknown operation - assume 3 arguments and cc_op in env */
523 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
524 break;
525 default:
526 tcg_abort();
529 if (local_cc_op) {
530 tcg_temp_free_i32(local_cc_op);
532 if (dummy) {
533 tcg_temp_free_i64(dummy);
536 /* We now have cc in cc_op as constant */
537 set_cc_static(s);
540 static bool use_exit_tb(DisasContext *s)
542 return (s->singlestep_enabled ||
543 (tb_cflags(s->tb) & CF_LAST_IO) ||
544 (s->tb->flags & FLAG_MASK_PER));
547 static bool use_goto_tb(DisasContext *s, uint64_t dest)
549 if (unlikely(use_exit_tb(s))) {
550 return false;
552 #ifndef CONFIG_USER_ONLY
553 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
554 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
555 #else
556 return true;
557 #endif
560 static void account_noninline_branch(DisasContext *s, int cc_op)
562 #ifdef DEBUG_INLINE_BRANCHES
563 inline_branch_miss[cc_op]++;
564 #endif
567 static void account_inline_branch(DisasContext *s, int cc_op)
569 #ifdef DEBUG_INLINE_BRANCHES
570 inline_branch_hit[cc_op]++;
571 #endif
574 /* Table of mask values to comparison codes, given a comparison as input.
575 For such, CC=3 should not be possible. */
576 static const TCGCond ltgt_cond[16] = {
577 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
578 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
579 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
580 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
581 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
582 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
583 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
584 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
587 /* Table of mask values to comparison codes, given a logic op as input.
588 For such, only CC=0 and CC=1 should be possible. */
589 static const TCGCond nz_cond[16] = {
590 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
591 TCG_COND_NEVER, TCG_COND_NEVER,
592 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
593 TCG_COND_NE, TCG_COND_NE,
594 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
595 TCG_COND_EQ, TCG_COND_EQ,
596 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
597 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
600 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
601 details required to generate a TCG comparison. */
602 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
604 TCGCond cond;
605 enum cc_op old_cc_op = s->cc_op;
607 if (mask == 15 || mask == 0) {
608 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
609 c->u.s32.a = cc_op;
610 c->u.s32.b = cc_op;
611 c->g1 = c->g2 = true;
612 c->is_64 = false;
613 return;
616 /* Find the TCG condition for the mask + cc op. */
617 switch (old_cc_op) {
618 case CC_OP_LTGT0_32:
619 case CC_OP_LTGT0_64:
620 case CC_OP_LTGT_32:
621 case CC_OP_LTGT_64:
622 cond = ltgt_cond[mask];
623 if (cond == TCG_COND_NEVER) {
624 goto do_dynamic;
626 account_inline_branch(s, old_cc_op);
627 break;
629 case CC_OP_LTUGTU_32:
630 case CC_OP_LTUGTU_64:
631 cond = tcg_unsigned_cond(ltgt_cond[mask]);
632 if (cond == TCG_COND_NEVER) {
633 goto do_dynamic;
635 account_inline_branch(s, old_cc_op);
636 break;
638 case CC_OP_NZ:
639 cond = nz_cond[mask];
640 if (cond == TCG_COND_NEVER) {
641 goto do_dynamic;
643 account_inline_branch(s, old_cc_op);
644 break;
646 case CC_OP_TM_32:
647 case CC_OP_TM_64:
648 switch (mask) {
649 case 8:
650 cond = TCG_COND_EQ;
651 break;
652 case 4 | 2 | 1:
653 cond = TCG_COND_NE;
654 break;
655 default:
656 goto do_dynamic;
658 account_inline_branch(s, old_cc_op);
659 break;
661 case CC_OP_ICM:
662 switch (mask) {
663 case 8:
664 cond = TCG_COND_EQ;
665 break;
666 case 4 | 2 | 1:
667 case 4 | 2:
668 cond = TCG_COND_NE;
669 break;
670 default:
671 goto do_dynamic;
673 account_inline_branch(s, old_cc_op);
674 break;
676 case CC_OP_FLOGR:
677 switch (mask & 0xa) {
678 case 8: /* src == 0 -> no one bit found */
679 cond = TCG_COND_EQ;
680 break;
681 case 2: /* src != 0 -> one bit found */
682 cond = TCG_COND_NE;
683 break;
684 default:
685 goto do_dynamic;
687 account_inline_branch(s, old_cc_op);
688 break;
690 case CC_OP_ADDU_32:
691 case CC_OP_ADDU_64:
692 switch (mask) {
693 case 8 | 2: /* vr == 0 */
694 cond = TCG_COND_EQ;
695 break;
696 case 4 | 1: /* vr != 0 */
697 cond = TCG_COND_NE;
698 break;
699 case 8 | 4: /* no carry -> vr >= src */
700 cond = TCG_COND_GEU;
701 break;
702 case 2 | 1: /* carry -> vr < src */
703 cond = TCG_COND_LTU;
704 break;
705 default:
706 goto do_dynamic;
708 account_inline_branch(s, old_cc_op);
709 break;
711 case CC_OP_SUBU_32:
712 case CC_OP_SUBU_64:
713 /* Note that CC=0 is impossible; treat it as dont-care. */
714 switch (mask & 7) {
715 case 2: /* zero -> op1 == op2 */
716 cond = TCG_COND_EQ;
717 break;
718 case 4 | 1: /* !zero -> op1 != op2 */
719 cond = TCG_COND_NE;
720 break;
721 case 4: /* borrow (!carry) -> op1 < op2 */
722 cond = TCG_COND_LTU;
723 break;
724 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
725 cond = TCG_COND_GEU;
726 break;
727 default:
728 goto do_dynamic;
730 account_inline_branch(s, old_cc_op);
731 break;
733 default:
734 do_dynamic:
735 /* Calculate cc value. */
736 gen_op_calc_cc(s);
737 /* FALLTHRU */
739 case CC_OP_STATIC:
740 /* Jump based on CC. We'll load up the real cond below;
741 the assignment here merely avoids a compiler warning. */
742 account_noninline_branch(s, old_cc_op);
743 old_cc_op = CC_OP_STATIC;
744 cond = TCG_COND_NEVER;
745 break;
748 /* Load up the arguments of the comparison. */
749 c->is_64 = true;
750 c->g1 = c->g2 = false;
751 switch (old_cc_op) {
752 case CC_OP_LTGT0_32:
753 c->is_64 = false;
754 c->u.s32.a = tcg_temp_new_i32();
755 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
756 c->u.s32.b = tcg_const_i32(0);
757 break;
758 case CC_OP_LTGT_32:
759 case CC_OP_LTUGTU_32:
760 case CC_OP_SUBU_32:
761 c->is_64 = false;
762 c->u.s32.a = tcg_temp_new_i32();
763 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
764 c->u.s32.b = tcg_temp_new_i32();
765 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
766 break;
768 case CC_OP_LTGT0_64:
769 case CC_OP_NZ:
770 case CC_OP_FLOGR:
771 c->u.s64.a = cc_dst;
772 c->u.s64.b = tcg_const_i64(0);
773 c->g1 = true;
774 break;
775 case CC_OP_LTGT_64:
776 case CC_OP_LTUGTU_64:
777 case CC_OP_SUBU_64:
778 c->u.s64.a = cc_src;
779 c->u.s64.b = cc_dst;
780 c->g1 = c->g2 = true;
781 break;
783 case CC_OP_TM_32:
784 case CC_OP_TM_64:
785 case CC_OP_ICM:
786 c->u.s64.a = tcg_temp_new_i64();
787 c->u.s64.b = tcg_const_i64(0);
788 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
789 break;
791 case CC_OP_ADDU_32:
792 c->is_64 = false;
793 c->u.s32.a = tcg_temp_new_i32();
794 c->u.s32.b = tcg_temp_new_i32();
795 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
796 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
797 tcg_gen_movi_i32(c->u.s32.b, 0);
798 } else {
799 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
801 break;
803 case CC_OP_ADDU_64:
804 c->u.s64.a = cc_vr;
805 c->g1 = true;
806 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
807 c->u.s64.b = tcg_const_i64(0);
808 } else {
809 c->u.s64.b = cc_src;
810 c->g2 = true;
812 break;
814 case CC_OP_STATIC:
815 c->is_64 = false;
816 c->u.s32.a = cc_op;
817 c->g1 = true;
818 switch (mask) {
819 case 0x8 | 0x4 | 0x2: /* cc != 3 */
820 cond = TCG_COND_NE;
821 c->u.s32.b = tcg_const_i32(3);
822 break;
823 case 0x8 | 0x4 | 0x1: /* cc != 2 */
824 cond = TCG_COND_NE;
825 c->u.s32.b = tcg_const_i32(2);
826 break;
827 case 0x8 | 0x2 | 0x1: /* cc != 1 */
828 cond = TCG_COND_NE;
829 c->u.s32.b = tcg_const_i32(1);
830 break;
831 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
832 cond = TCG_COND_EQ;
833 c->g1 = false;
834 c->u.s32.a = tcg_temp_new_i32();
835 c->u.s32.b = tcg_const_i32(0);
836 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
837 break;
838 case 0x8 | 0x4: /* cc < 2 */
839 cond = TCG_COND_LTU;
840 c->u.s32.b = tcg_const_i32(2);
841 break;
842 case 0x8: /* cc == 0 */
843 cond = TCG_COND_EQ;
844 c->u.s32.b = tcg_const_i32(0);
845 break;
846 case 0x4 | 0x2 | 0x1: /* cc != 0 */
847 cond = TCG_COND_NE;
848 c->u.s32.b = tcg_const_i32(0);
849 break;
850 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
851 cond = TCG_COND_NE;
852 c->g1 = false;
853 c->u.s32.a = tcg_temp_new_i32();
854 c->u.s32.b = tcg_const_i32(0);
855 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
856 break;
857 case 0x4: /* cc == 1 */
858 cond = TCG_COND_EQ;
859 c->u.s32.b = tcg_const_i32(1);
860 break;
861 case 0x2 | 0x1: /* cc > 1 */
862 cond = TCG_COND_GTU;
863 c->u.s32.b = tcg_const_i32(1);
864 break;
865 case 0x2: /* cc == 2 */
866 cond = TCG_COND_EQ;
867 c->u.s32.b = tcg_const_i32(2);
868 break;
869 case 0x1: /* cc == 3 */
870 cond = TCG_COND_EQ;
871 c->u.s32.b = tcg_const_i32(3);
872 break;
873 default:
874 /* CC is masked by something else: (8 >> cc) & mask. */
875 cond = TCG_COND_NE;
876 c->g1 = false;
877 c->u.s32.a = tcg_const_i32(8);
878 c->u.s32.b = tcg_const_i32(0);
879 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
880 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
881 break;
883 break;
885 default:
886 abort();
888 c->cond = cond;
891 static void free_compare(DisasCompare *c)
893 if (!c->g1) {
894 if (c->is_64) {
895 tcg_temp_free_i64(c->u.s64.a);
896 } else {
897 tcg_temp_free_i32(c->u.s32.a);
900 if (!c->g2) {
901 if (c->is_64) {
902 tcg_temp_free_i64(c->u.s64.b);
903 } else {
904 tcg_temp_free_i32(c->u.s32.b);
909 /* ====================================================================== */
910 /* Define the insn format enumeration. */
911 #define F0(N) FMT_##N,
912 #define F1(N, X1) F0(N)
913 #define F2(N, X1, X2) F0(N)
914 #define F3(N, X1, X2, X3) F0(N)
915 #define F4(N, X1, X2, X3, X4) F0(N)
916 #define F5(N, X1, X2, X3, X4, X5) F0(N)
918 typedef enum {
919 #include "insn-format.def"
920 } DisasFormat;
922 #undef F0
923 #undef F1
924 #undef F2
925 #undef F3
926 #undef F4
927 #undef F5
929 /* Define a structure to hold the decoded fields. We'll store each inside
930 an array indexed by an enum. In order to conserve memory, we'll arrange
931 for fields that do not exist at the same time to overlap, thus the "C"
932 for compact. For checking purposes there is an "O" for original index
933 as well that will be applied to availability bitmaps. */
935 enum DisasFieldIndexO {
936 FLD_O_r1,
937 FLD_O_r2,
938 FLD_O_r3,
939 FLD_O_m1,
940 FLD_O_m3,
941 FLD_O_m4,
942 FLD_O_b1,
943 FLD_O_b2,
944 FLD_O_b4,
945 FLD_O_d1,
946 FLD_O_d2,
947 FLD_O_d4,
948 FLD_O_x2,
949 FLD_O_l1,
950 FLD_O_l2,
951 FLD_O_i1,
952 FLD_O_i2,
953 FLD_O_i3,
954 FLD_O_i4,
955 FLD_O_i5
958 enum DisasFieldIndexC {
959 FLD_C_r1 = 0,
960 FLD_C_m1 = 0,
961 FLD_C_b1 = 0,
962 FLD_C_i1 = 0,
964 FLD_C_r2 = 1,
965 FLD_C_b2 = 1,
966 FLD_C_i2 = 1,
968 FLD_C_r3 = 2,
969 FLD_C_m3 = 2,
970 FLD_C_i3 = 2,
972 FLD_C_m4 = 3,
973 FLD_C_b4 = 3,
974 FLD_C_i4 = 3,
975 FLD_C_l1 = 3,
977 FLD_C_i5 = 4,
978 FLD_C_d1 = 4,
980 FLD_C_d2 = 5,
982 FLD_C_d4 = 6,
983 FLD_C_x2 = 6,
984 FLD_C_l2 = 6,
986 NUM_C_FIELD = 7
989 struct DisasFields {
990 uint64_t raw_insn;
991 unsigned op:8;
992 unsigned op2:8;
993 unsigned presentC:16;
994 unsigned int presentO;
995 int c[NUM_C_FIELD];
998 /* This is the way fields are to be accessed out of DisasFields. */
999 #define have_field(S, F) have_field1((S), FLD_O_##F)
1000 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1002 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1004 return (f->presentO >> c) & 1;
1007 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1008 enum DisasFieldIndexC c)
1010 assert(have_field1(f, o));
1011 return f->c[c];
1014 /* Describe the layout of each field in each format. */
1015 typedef struct DisasField {
1016 unsigned int beg:8;
1017 unsigned int size:8;
1018 unsigned int type:2;
1019 unsigned int indexC:6;
1020 enum DisasFieldIndexO indexO:8;
1021 } DisasField;
1023 typedef struct DisasFormatInfo {
1024 DisasField op[NUM_C_FIELD];
1025 } DisasFormatInfo;
1027 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1028 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1029 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1030 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1031 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1032 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1033 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1034 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1035 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1036 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1037 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1038 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1039 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1040 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1042 #define F0(N) { { } },
1043 #define F1(N, X1) { { X1 } },
1044 #define F2(N, X1, X2) { { X1, X2 } },
1045 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1046 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1047 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1049 static const DisasFormatInfo format_info[] = {
1050 #include "insn-format.def"
1053 #undef F0
1054 #undef F1
1055 #undef F2
1056 #undef F3
1057 #undef F4
1058 #undef F5
1059 #undef R
1060 #undef M
1061 #undef BD
1062 #undef BXD
1063 #undef BDL
1064 #undef BXDL
1065 #undef I
1066 #undef L
1068 /* Generally, we'll extract operands into this structures, operate upon
1069 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1070 of routines below for more details. */
1071 typedef struct {
1072 bool g_out, g_out2, g_in1, g_in2;
1073 TCGv_i64 out, out2, in1, in2;
1074 TCGv_i64 addr1;
1075 } DisasOps;
1077 /* Instructions can place constraints on their operands, raising specification
1078 exceptions if they are violated. To make this easy to automate, each "in1",
1079 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1080 of the following, or 0. To make this easy to document, we'll put the
1081 SPEC_<name> defines next to <name>. */
1083 #define SPEC_r1_even 1
1084 #define SPEC_r2_even 2
1085 #define SPEC_r3_even 4
1086 #define SPEC_r1_f128 8
1087 #define SPEC_r2_f128 16
1089 /* Return values from translate_one, indicating the state of the TB. */
1090 typedef enum {
1091 /* Continue the TB. */
1092 NO_EXIT,
1093 /* We have emitted one or more goto_tb. No fixup required. */
1094 EXIT_GOTO_TB,
1095 /* We are not using a goto_tb (for whatever reason), but have updated
1096 the PC (for whatever reason), so there's no need to do it again on
1097 exiting the TB. */
1098 EXIT_PC_UPDATED,
1099 /* We have updated the PC and CC values. */
1100 EXIT_PC_CC_UPDATED,
1101 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1102 updated the PC for the next instruction to be executed. */
1103 EXIT_PC_STALE,
1104 /* We are exiting the TB to the main loop. */
1105 EXIT_PC_STALE_NOCHAIN,
1106 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1107 No following code will be executed. */
1108 EXIT_NORETURN,
1109 } ExitStatus;
1111 struct DisasInsn {
1112 unsigned opc:16;
1113 DisasFormat fmt:8;
1114 unsigned fac:8;
1115 unsigned spec:8;
1117 const char *name;
1119 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1120 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1121 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1122 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1123 void (*help_cout)(DisasContext *, DisasOps *);
1124 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1126 uint64_t data;
1129 /* ====================================================================== */
1130 /* Miscellaneous helpers, used by several operations. */
1132 static void help_l2_shift(DisasContext *s, DisasFields *f,
1133 DisasOps *o, int mask)
1135 int b2 = get_field(f, b2);
1136 int d2 = get_field(f, d2);
1138 if (b2 == 0) {
1139 o->in2 = tcg_const_i64(d2 & mask);
1140 } else {
1141 o->in2 = get_address(s, 0, b2, d2);
1142 tcg_gen_andi_i64(o->in2, o->in2, mask);
1146 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1148 if (dest == s->next_pc) {
1149 per_branch(s, true);
1150 return NO_EXIT;
1152 if (use_goto_tb(s, dest)) {
1153 update_cc_op(s);
1154 per_breaking_event(s);
1155 tcg_gen_goto_tb(0);
1156 tcg_gen_movi_i64(psw_addr, dest);
1157 tcg_gen_exit_tb((uintptr_t)s->tb);
1158 return EXIT_GOTO_TB;
1159 } else {
1160 tcg_gen_movi_i64(psw_addr, dest);
1161 per_branch(s, false);
1162 return EXIT_PC_UPDATED;
1166 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1167 bool is_imm, int imm, TCGv_i64 cdest)
1169 ExitStatus ret;
1170 uint64_t dest = s->pc + 2 * imm;
1171 TCGLabel *lab;
1173 /* Take care of the special cases first. */
1174 if (c->cond == TCG_COND_NEVER) {
1175 ret = NO_EXIT;
1176 goto egress;
1178 if (is_imm) {
1179 if (dest == s->next_pc) {
1180 /* Branch to next. */
1181 per_branch(s, true);
1182 ret = NO_EXIT;
1183 goto egress;
1185 if (c->cond == TCG_COND_ALWAYS) {
1186 ret = help_goto_direct(s, dest);
1187 goto egress;
1189 } else {
1190 if (!cdest) {
1191 /* E.g. bcr %r0 -> no branch. */
1192 ret = NO_EXIT;
1193 goto egress;
1195 if (c->cond == TCG_COND_ALWAYS) {
1196 tcg_gen_mov_i64(psw_addr, cdest);
1197 per_branch(s, false);
1198 ret = EXIT_PC_UPDATED;
1199 goto egress;
1203 if (use_goto_tb(s, s->next_pc)) {
1204 if (is_imm && use_goto_tb(s, dest)) {
1205 /* Both exits can use goto_tb. */
1206 update_cc_op(s);
1208 lab = gen_new_label();
1209 if (c->is_64) {
1210 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1211 } else {
1212 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1215 /* Branch not taken. */
1216 tcg_gen_goto_tb(0);
1217 tcg_gen_movi_i64(psw_addr, s->next_pc);
1218 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1220 /* Branch taken. */
1221 gen_set_label(lab);
1222 per_breaking_event(s);
1223 tcg_gen_goto_tb(1);
1224 tcg_gen_movi_i64(psw_addr, dest);
1225 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1227 ret = EXIT_GOTO_TB;
1228 } else {
1229 /* Fallthru can use goto_tb, but taken branch cannot. */
1230 /* Store taken branch destination before the brcond. This
1231 avoids having to allocate a new local temp to hold it.
1232 We'll overwrite this in the not taken case anyway. */
1233 if (!is_imm) {
1234 tcg_gen_mov_i64(psw_addr, cdest);
1237 lab = gen_new_label();
1238 if (c->is_64) {
1239 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1240 } else {
1241 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1244 /* Branch not taken. */
1245 update_cc_op(s);
1246 tcg_gen_goto_tb(0);
1247 tcg_gen_movi_i64(psw_addr, s->next_pc);
1248 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1250 gen_set_label(lab);
1251 if (is_imm) {
1252 tcg_gen_movi_i64(psw_addr, dest);
1254 per_breaking_event(s);
1255 ret = EXIT_PC_UPDATED;
1257 } else {
1258 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1259 Most commonly we're single-stepping or some other condition that
1260 disables all use of goto_tb. Just update the PC and exit. */
1262 TCGv_i64 next = tcg_const_i64(s->next_pc);
1263 if (is_imm) {
1264 cdest = tcg_const_i64(dest);
1267 if (c->is_64) {
1268 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1269 cdest, next);
1270 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1271 } else {
1272 TCGv_i32 t0 = tcg_temp_new_i32();
1273 TCGv_i64 t1 = tcg_temp_new_i64();
1274 TCGv_i64 z = tcg_const_i64(0);
1275 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1276 tcg_gen_extu_i32_i64(t1, t0);
1277 tcg_temp_free_i32(t0);
1278 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1279 per_branch_cond(s, TCG_COND_NE, t1, z);
1280 tcg_temp_free_i64(t1);
1281 tcg_temp_free_i64(z);
1284 if (is_imm) {
1285 tcg_temp_free_i64(cdest);
1287 tcg_temp_free_i64(next);
1289 ret = EXIT_PC_UPDATED;
1292 egress:
1293 free_compare(c);
1294 return ret;
1297 /* ====================================================================== */
1298 /* The operations. These perform the bulk of the work for any insn,
1299 usually after the operands have been loaded and output initialized. */
1301 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1303 TCGv_i64 z, n;
1304 z = tcg_const_i64(0);
1305 n = tcg_temp_new_i64();
1306 tcg_gen_neg_i64(n, o->in2);
1307 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1308 tcg_temp_free_i64(n);
1309 tcg_temp_free_i64(z);
1310 return NO_EXIT;
1313 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1315 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1316 return NO_EXIT;
1319 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1321 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1322 return NO_EXIT;
1325 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1327 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1328 tcg_gen_mov_i64(o->out2, o->in2);
1329 return NO_EXIT;
1332 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1334 tcg_gen_add_i64(o->out, o->in1, o->in2);
1335 return NO_EXIT;
1338 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1340 DisasCompare cmp;
1341 TCGv_i64 carry;
1343 tcg_gen_add_i64(o->out, o->in1, o->in2);
1345 /* The carry flag is the msb of CC, therefore the branch mask that would
1346 create that comparison is 3. Feeding the generated comparison to
1347 setcond produces the carry flag that we desire. */
1348 disas_jcc(s, &cmp, 3);
1349 carry = tcg_temp_new_i64();
1350 if (cmp.is_64) {
1351 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1352 } else {
1353 TCGv_i32 t = tcg_temp_new_i32();
1354 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1355 tcg_gen_extu_i32_i64(carry, t);
1356 tcg_temp_free_i32(t);
1358 free_compare(&cmp);
1360 tcg_gen_add_i64(o->out, o->out, carry);
1361 tcg_temp_free_i64(carry);
1362 return NO_EXIT;
1365 static ExitStatus op_asi(DisasContext *s, DisasOps *o)
1367 o->in1 = tcg_temp_new_i64();
1369 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1370 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1371 } else {
1372 /* Perform the atomic addition in memory. */
1373 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1374 s->insn->data);
1377 /* Recompute also for atomic case: needed for setting CC. */
1378 tcg_gen_add_i64(o->out, o->in1, o->in2);
1380 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1381 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1383 return NO_EXIT;
1386 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1388 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1389 return NO_EXIT;
1392 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1394 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1395 return NO_EXIT;
1398 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1400 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1401 return_low128(o->out2);
1402 return NO_EXIT;
1405 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1407 tcg_gen_and_i64(o->out, o->in1, o->in2);
1408 return NO_EXIT;
1411 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1413 int shift = s->insn->data & 0xff;
1414 int size = s->insn->data >> 8;
1415 uint64_t mask = ((1ull << size) - 1) << shift;
1417 assert(!o->g_in2);
1418 tcg_gen_shli_i64(o->in2, o->in2, shift);
1419 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1420 tcg_gen_and_i64(o->out, o->in1, o->in2);
1422 /* Produce the CC from only the bits manipulated. */
1423 tcg_gen_andi_i64(cc_dst, o->out, mask);
1424 set_cc_nz_u64(s, cc_dst);
1425 return NO_EXIT;
1428 static ExitStatus op_ni(DisasContext *s, DisasOps *o)
1430 o->in1 = tcg_temp_new_i64();
1432 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1433 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1434 } else {
1435 /* Perform the atomic operation in memory. */
1436 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1437 s->insn->data);
1440 /* Recompute also for atomic case: needed for setting CC. */
1441 tcg_gen_and_i64(o->out, o->in1, o->in2);
1443 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1444 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1446 return NO_EXIT;
1449 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1451 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1452 if (o->in2) {
1453 tcg_gen_mov_i64(psw_addr, o->in2);
1454 per_branch(s, false);
1455 return EXIT_PC_UPDATED;
1456 } else {
1457 return NO_EXIT;
1461 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1463 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1464 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1467 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1469 int m1 = get_field(s->fields, m1);
1470 bool is_imm = have_field(s->fields, i2);
1471 int imm = is_imm ? get_field(s->fields, i2) : 0;
1472 DisasCompare c;
1474 /* BCR with R2 = 0 causes no branching */
1475 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1476 if (m1 == 14) {
1477 /* Perform serialization */
1478 /* FIXME: check for fast-BCR-serialization facility */
1479 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1481 if (m1 == 15) {
1482 /* Perform serialization */
1483 /* FIXME: perform checkpoint-synchronisation */
1484 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1486 return NO_EXIT;
1489 disas_jcc(s, &c, m1);
1490 return help_branch(s, &c, is_imm, imm, o->in2);
1493 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1495 int r1 = get_field(s->fields, r1);
1496 bool is_imm = have_field(s->fields, i2);
1497 int imm = is_imm ? get_field(s->fields, i2) : 0;
1498 DisasCompare c;
1499 TCGv_i64 t;
1501 c.cond = TCG_COND_NE;
1502 c.is_64 = false;
1503 c.g1 = false;
1504 c.g2 = false;
1506 t = tcg_temp_new_i64();
1507 tcg_gen_subi_i64(t, regs[r1], 1);
1508 store_reg32_i64(r1, t);
1509 c.u.s32.a = tcg_temp_new_i32();
1510 c.u.s32.b = tcg_const_i32(0);
1511 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1512 tcg_temp_free_i64(t);
1514 return help_branch(s, &c, is_imm, imm, o->in2);
1517 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1519 int r1 = get_field(s->fields, r1);
1520 int imm = get_field(s->fields, i2);
1521 DisasCompare c;
1522 TCGv_i64 t;
1524 c.cond = TCG_COND_NE;
1525 c.is_64 = false;
1526 c.g1 = false;
1527 c.g2 = false;
1529 t = tcg_temp_new_i64();
1530 tcg_gen_shri_i64(t, regs[r1], 32);
1531 tcg_gen_subi_i64(t, t, 1);
1532 store_reg32h_i64(r1, t);
1533 c.u.s32.a = tcg_temp_new_i32();
1534 c.u.s32.b = tcg_const_i32(0);
1535 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1536 tcg_temp_free_i64(t);
1538 return help_branch(s, &c, 1, imm, o->in2);
1541 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1543 int r1 = get_field(s->fields, r1);
1544 bool is_imm = have_field(s->fields, i2);
1545 int imm = is_imm ? get_field(s->fields, i2) : 0;
1546 DisasCompare c;
1548 c.cond = TCG_COND_NE;
1549 c.is_64 = true;
1550 c.g1 = true;
1551 c.g2 = false;
1553 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1554 c.u.s64.a = regs[r1];
1555 c.u.s64.b = tcg_const_i64(0);
1557 return help_branch(s, &c, is_imm, imm, o->in2);
1560 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1562 int r1 = get_field(s->fields, r1);
1563 int r3 = get_field(s->fields, r3);
1564 bool is_imm = have_field(s->fields, i2);
1565 int imm = is_imm ? get_field(s->fields, i2) : 0;
1566 DisasCompare c;
1567 TCGv_i64 t;
1569 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1570 c.is_64 = false;
1571 c.g1 = false;
1572 c.g2 = false;
1574 t = tcg_temp_new_i64();
1575 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1576 c.u.s32.a = tcg_temp_new_i32();
1577 c.u.s32.b = tcg_temp_new_i32();
1578 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1579 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1580 store_reg32_i64(r1, t);
1581 tcg_temp_free_i64(t);
1583 return help_branch(s, &c, is_imm, imm, o->in2);
1586 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1588 int r1 = get_field(s->fields, r1);
1589 int r3 = get_field(s->fields, r3);
1590 bool is_imm = have_field(s->fields, i2);
1591 int imm = is_imm ? get_field(s->fields, i2) : 0;
1592 DisasCompare c;
1594 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1595 c.is_64 = true;
1597 if (r1 == (r3 | 1)) {
1598 c.u.s64.b = load_reg(r3 | 1);
1599 c.g2 = false;
1600 } else {
1601 c.u.s64.b = regs[r3 | 1];
1602 c.g2 = true;
1605 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1606 c.u.s64.a = regs[r1];
1607 c.g1 = true;
1609 return help_branch(s, &c, is_imm, imm, o->in2);
1612 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1614 int imm, m3 = get_field(s->fields, m3);
1615 bool is_imm;
1616 DisasCompare c;
1618 c.cond = ltgt_cond[m3];
1619 if (s->insn->data) {
1620 c.cond = tcg_unsigned_cond(c.cond);
1622 c.is_64 = c.g1 = c.g2 = true;
1623 c.u.s64.a = o->in1;
1624 c.u.s64.b = o->in2;
1626 is_imm = have_field(s->fields, i4);
1627 if (is_imm) {
1628 imm = get_field(s->fields, i4);
1629 } else {
1630 imm = 0;
1631 o->out = get_address(s, 0, get_field(s->fields, b4),
1632 get_field(s->fields, d4));
1635 return help_branch(s, &c, is_imm, imm, o->out);
1638 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1640 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1641 set_cc_static(s);
1642 return NO_EXIT;
1645 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1647 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1648 set_cc_static(s);
1649 return NO_EXIT;
1652 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1654 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1655 set_cc_static(s);
1656 return NO_EXIT;
1659 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1661 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1662 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1663 tcg_temp_free_i32(m3);
1664 gen_set_cc_nz_f32(s, o->in2);
1665 return NO_EXIT;
1668 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1670 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1671 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1672 tcg_temp_free_i32(m3);
1673 gen_set_cc_nz_f64(s, o->in2);
1674 return NO_EXIT;
1677 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1679 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1680 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1681 tcg_temp_free_i32(m3);
1682 gen_set_cc_nz_f128(s, o->in1, o->in2);
1683 return NO_EXIT;
1686 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1688 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1689 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1690 tcg_temp_free_i32(m3);
1691 gen_set_cc_nz_f32(s, o->in2);
1692 return NO_EXIT;
1695 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1697 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1698 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1699 tcg_temp_free_i32(m3);
1700 gen_set_cc_nz_f64(s, o->in2);
1701 return NO_EXIT;
1704 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1706 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1707 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1708 tcg_temp_free_i32(m3);
1709 gen_set_cc_nz_f128(s, o->in1, o->in2);
1710 return NO_EXIT;
1713 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1715 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1716 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1717 tcg_temp_free_i32(m3);
1718 gen_set_cc_nz_f32(s, o->in2);
1719 return NO_EXIT;
1722 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1724 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1725 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1726 tcg_temp_free_i32(m3);
1727 gen_set_cc_nz_f64(s, o->in2);
1728 return NO_EXIT;
1731 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1733 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1734 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1735 tcg_temp_free_i32(m3);
1736 gen_set_cc_nz_f128(s, o->in1, o->in2);
1737 return NO_EXIT;
1740 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1742 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1743 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1744 tcg_temp_free_i32(m3);
1745 gen_set_cc_nz_f32(s, o->in2);
1746 return NO_EXIT;
1749 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1751 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1752 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1753 tcg_temp_free_i32(m3);
1754 gen_set_cc_nz_f64(s, o->in2);
1755 return NO_EXIT;
1758 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1760 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1761 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1762 tcg_temp_free_i32(m3);
1763 gen_set_cc_nz_f128(s, o->in1, o->in2);
1764 return NO_EXIT;
1767 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1769 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1770 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1771 tcg_temp_free_i32(m3);
1772 return NO_EXIT;
1775 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1777 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1778 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1779 tcg_temp_free_i32(m3);
1780 return NO_EXIT;
1783 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1785 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1786 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1787 tcg_temp_free_i32(m3);
1788 return_low128(o->out2);
1789 return NO_EXIT;
1792 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1794 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1795 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1796 tcg_temp_free_i32(m3);
1797 return NO_EXIT;
1800 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1802 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1803 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1804 tcg_temp_free_i32(m3);
1805 return NO_EXIT;
1808 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1810 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1811 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1812 tcg_temp_free_i32(m3);
1813 return_low128(o->out2);
1814 return NO_EXIT;
1817 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1819 int r2 = get_field(s->fields, r2);
1820 TCGv_i64 len = tcg_temp_new_i64();
1822 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1823 set_cc_static(s);
1824 return_low128(o->out);
1826 tcg_gen_add_i64(regs[r2], regs[r2], len);
1827 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1828 tcg_temp_free_i64(len);
1830 return NO_EXIT;
1833 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1835 int l = get_field(s->fields, l1);
1836 TCGv_i32 vl;
1838 switch (l + 1) {
1839 case 1:
1840 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1841 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1842 break;
1843 case 2:
1844 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1845 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1846 break;
1847 case 4:
1848 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1849 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1850 break;
1851 case 8:
1852 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1853 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1854 break;
1855 default:
1856 vl = tcg_const_i32(l);
1857 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1858 tcg_temp_free_i32(vl);
1859 set_cc_static(s);
1860 return NO_EXIT;
1862 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1863 return NO_EXIT;
1866 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1868 int r1 = get_field(s->fields, r1);
1869 int r2 = get_field(s->fields, r2);
1870 TCGv_i32 t1, t2;
1872 /* r1 and r2 must be even. */
1873 if (r1 & 1 || r2 & 1) {
1874 gen_program_exception(s, PGM_SPECIFICATION);
1875 return EXIT_NORETURN;
1878 t1 = tcg_const_i32(r1);
1879 t2 = tcg_const_i32(r2);
1880 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1881 tcg_temp_free_i32(t1);
1882 tcg_temp_free_i32(t2);
1883 set_cc_static(s);
1884 return NO_EXIT;
1887 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1889 int r1 = get_field(s->fields, r1);
1890 int r3 = get_field(s->fields, r3);
1891 TCGv_i32 t1, t3;
1893 /* r1 and r3 must be even. */
1894 if (r1 & 1 || r3 & 1) {
1895 gen_program_exception(s, PGM_SPECIFICATION);
1896 return EXIT_NORETURN;
1899 t1 = tcg_const_i32(r1);
1900 t3 = tcg_const_i32(r3);
1901 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1902 tcg_temp_free_i32(t1);
1903 tcg_temp_free_i32(t3);
1904 set_cc_static(s);
1905 return NO_EXIT;
1908 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1910 int r1 = get_field(s->fields, r1);
1911 int r3 = get_field(s->fields, r3);
1912 TCGv_i32 t1, t3;
1914 /* r1 and r3 must be even. */
1915 if (r1 & 1 || r3 & 1) {
1916 gen_program_exception(s, PGM_SPECIFICATION);
1917 return EXIT_NORETURN;
1920 t1 = tcg_const_i32(r1);
1921 t3 = tcg_const_i32(r3);
1922 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1923 tcg_temp_free_i32(t1);
1924 tcg_temp_free_i32(t3);
1925 set_cc_static(s);
1926 return NO_EXIT;
1929 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1931 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1932 TCGv_i32 t1 = tcg_temp_new_i32();
1933 tcg_gen_extrl_i64_i32(t1, o->in1);
1934 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1935 set_cc_static(s);
1936 tcg_temp_free_i32(t1);
1937 tcg_temp_free_i32(m3);
1938 return NO_EXIT;
1941 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1943 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1944 set_cc_static(s);
1945 return_low128(o->in2);
1946 return NO_EXIT;
1949 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1951 TCGv_i64 t = tcg_temp_new_i64();
1952 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1953 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1954 tcg_gen_or_i64(o->out, o->out, t);
1955 tcg_temp_free_i64(t);
1956 return NO_EXIT;
1959 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1961 int d2 = get_field(s->fields, d2);
1962 int b2 = get_field(s->fields, b2);
1963 TCGv_i64 addr, cc;
1965 /* Note that in1 = R3 (new value) and
1966 in2 = (zero-extended) R1 (expected value). */
1968 addr = get_address(s, 0, b2, d2);
1969 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1970 get_mem_index(s), s->insn->data | MO_ALIGN);
1971 tcg_temp_free_i64(addr);
1973 /* Are the memory and expected values (un)equal? Note that this setcond
1974 produces the output CC value, thus the NE sense of the test. */
1975 cc = tcg_temp_new_i64();
1976 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1977 tcg_gen_extrl_i64_i32(cc_op, cc);
1978 tcg_temp_free_i64(cc);
1979 set_cc_static(s);
1981 return NO_EXIT;
1984 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1986 int r1 = get_field(s->fields, r1);
1987 int r3 = get_field(s->fields, r3);
1988 int d2 = get_field(s->fields, d2);
1989 int b2 = get_field(s->fields, b2);
1990 TCGv_i64 addr;
1991 TCGv_i32 t_r1, t_r3;
1993 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1994 addr = get_address(s, 0, b2, d2);
1995 t_r1 = tcg_const_i32(r1);
1996 t_r3 = tcg_const_i32(r3);
1997 if (tb_cflags(s->tb) & CF_PARALLEL) {
1998 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
1999 } else {
2000 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2002 tcg_temp_free_i64(addr);
2003 tcg_temp_free_i32(t_r1);
2004 tcg_temp_free_i32(t_r3);
2006 set_cc_static(s);
2007 return NO_EXIT;
2010 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
2012 int r3 = get_field(s->fields, r3);
2013 TCGv_i32 t_r3 = tcg_const_i32(r3);
2015 if (tb_cflags(s->tb) & CF_PARALLEL) {
2016 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
2017 } else {
2018 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
2020 tcg_temp_free_i32(t_r3);
2022 set_cc_static(s);
2023 return NO_EXIT;
2026 #ifndef CONFIG_USER_ONLY
2027 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2029 TCGMemOp mop = s->insn->data;
2030 TCGv_i64 addr, old, cc;
2031 TCGLabel *lab = gen_new_label();
2033 /* Note that in1 = R1 (zero-extended expected value),
2034 out = R1 (original reg), out2 = R1+1 (new value). */
2036 check_privileged(s);
2037 addr = tcg_temp_new_i64();
2038 old = tcg_temp_new_i64();
2039 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2040 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2041 get_mem_index(s), mop | MO_ALIGN);
2042 tcg_temp_free_i64(addr);
2044 /* Are the memory and expected values (un)equal? */
2045 cc = tcg_temp_new_i64();
2046 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2047 tcg_gen_extrl_i64_i32(cc_op, cc);
2049 /* Write back the output now, so that it happens before the
2050 following branch, so that we don't need local temps. */
2051 if ((mop & MO_SIZE) == MO_32) {
2052 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2053 } else {
2054 tcg_gen_mov_i64(o->out, old);
2056 tcg_temp_free_i64(old);
2058 /* If the comparison was equal, and the LSB of R2 was set,
2059 then we need to flush the TLB (for all cpus). */
2060 tcg_gen_xori_i64(cc, cc, 1);
2061 tcg_gen_and_i64(cc, cc, o->in2);
2062 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2063 tcg_temp_free_i64(cc);
2065 gen_helper_purge(cpu_env);
2066 gen_set_label(lab);
2068 return NO_EXIT;
2070 #endif
2072 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2074 TCGv_i64 t1 = tcg_temp_new_i64();
2075 TCGv_i32 t2 = tcg_temp_new_i32();
2076 tcg_gen_extrl_i64_i32(t2, o->in1);
2077 gen_helper_cvd(t1, t2);
2078 tcg_temp_free_i32(t2);
2079 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2080 tcg_temp_free_i64(t1);
2081 return NO_EXIT;
2084 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2086 int m3 = get_field(s->fields, m3);
2087 TCGLabel *lab = gen_new_label();
2088 TCGCond c;
2090 c = tcg_invert_cond(ltgt_cond[m3]);
2091 if (s->insn->data) {
2092 c = tcg_unsigned_cond(c);
2094 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2096 /* Trap. */
2097 gen_trap(s);
2099 gen_set_label(lab);
2100 return NO_EXIT;
2103 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2105 int m3 = get_field(s->fields, m3);
2106 int r1 = get_field(s->fields, r1);
2107 int r2 = get_field(s->fields, r2);
2108 TCGv_i32 tr1, tr2, chk;
2110 /* R1 and R2 must both be even. */
2111 if ((r1 | r2) & 1) {
2112 gen_program_exception(s, PGM_SPECIFICATION);
2113 return EXIT_NORETURN;
2115 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2116 m3 = 0;
2119 tr1 = tcg_const_i32(r1);
2120 tr2 = tcg_const_i32(r2);
2121 chk = tcg_const_i32(m3);
2123 switch (s->insn->data) {
2124 case 12:
2125 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2126 break;
2127 case 14:
2128 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2129 break;
2130 case 21:
2131 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2132 break;
2133 case 24:
2134 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2135 break;
2136 case 41:
2137 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2138 break;
2139 case 42:
2140 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2141 break;
2142 default:
2143 g_assert_not_reached();
2146 tcg_temp_free_i32(tr1);
2147 tcg_temp_free_i32(tr2);
2148 tcg_temp_free_i32(chk);
2149 set_cc_static(s);
2150 return NO_EXIT;
2153 #ifndef CONFIG_USER_ONLY
2154 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2156 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2157 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2158 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2160 check_privileged(s);
2161 gen_helper_diag(cpu_env, r1, r3, func_code);
2163 tcg_temp_free_i32(func_code);
2164 tcg_temp_free_i32(r3);
2165 tcg_temp_free_i32(r1);
2166 return NO_EXIT;
2168 #endif
2170 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2172 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2173 return_low128(o->out);
2174 return NO_EXIT;
2177 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2179 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2180 return_low128(o->out);
2181 return NO_EXIT;
2184 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2186 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2187 return_low128(o->out);
2188 return NO_EXIT;
2191 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2193 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2194 return_low128(o->out);
2195 return NO_EXIT;
2198 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2200 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2201 return NO_EXIT;
2204 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2206 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2207 return NO_EXIT;
2210 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2212 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2213 return_low128(o->out2);
2214 return NO_EXIT;
2217 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2219 int r2 = get_field(s->fields, r2);
2220 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2221 return NO_EXIT;
2224 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2226 /* No cache information provided. */
2227 tcg_gen_movi_i64(o->out, -1);
2228 return NO_EXIT;
2231 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2233 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2234 return NO_EXIT;
2237 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2239 int r1 = get_field(s->fields, r1);
2240 int r2 = get_field(s->fields, r2);
2241 TCGv_i64 t = tcg_temp_new_i64();
2243 /* Note the "subsequently" in the PoO, which implies a defined result
2244 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2245 tcg_gen_shri_i64(t, psw_mask, 32);
2246 store_reg32_i64(r1, t);
2247 if (r2 != 0) {
2248 store_reg32_i64(r2, psw_mask);
2251 tcg_temp_free_i64(t);
2252 return NO_EXIT;
2255 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2257 int r1 = get_field(s->fields, r1);
2258 TCGv_i32 ilen;
2259 TCGv_i64 v1;
2261 /* Nested EXECUTE is not allowed. */
2262 if (unlikely(s->ex_value)) {
2263 gen_program_exception(s, PGM_EXECUTE);
2264 return EXIT_NORETURN;
2267 update_psw_addr(s);
2268 update_cc_op(s);
2270 if (r1 == 0) {
2271 v1 = tcg_const_i64(0);
2272 } else {
2273 v1 = regs[r1];
2276 ilen = tcg_const_i32(s->ilen);
2277 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2278 tcg_temp_free_i32(ilen);
2280 if (r1 == 0) {
2281 tcg_temp_free_i64(v1);
2284 return EXIT_PC_CC_UPDATED;
2287 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2289 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2290 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2291 tcg_temp_free_i32(m3);
2292 return NO_EXIT;
2295 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2297 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2298 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2299 tcg_temp_free_i32(m3);
2300 return NO_EXIT;
2303 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2305 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2306 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2307 return_low128(o->out2);
2308 tcg_temp_free_i32(m3);
2309 return NO_EXIT;
2312 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2314 /* We'll use the original input for cc computation, since we get to
2315 compare that against 0, which ought to be better than comparing
2316 the real output against 64. It also lets cc_dst be a convenient
2317 temporary during our computation. */
2318 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2320 /* R1 = IN ? CLZ(IN) : 64. */
2321 tcg_gen_clzi_i64(o->out, o->in2, 64);
2323 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2324 value by 64, which is undefined. But since the shift is 64 iff the
2325 input is zero, we still get the correct result after and'ing. */
2326 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2327 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2328 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2329 return NO_EXIT;
2332 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2334 int m3 = get_field(s->fields, m3);
2335 int pos, len, base = s->insn->data;
2336 TCGv_i64 tmp = tcg_temp_new_i64();
2337 uint64_t ccm;
2339 switch (m3) {
2340 case 0xf:
2341 /* Effectively a 32-bit load. */
2342 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2343 len = 32;
2344 goto one_insert;
2346 case 0xc:
2347 case 0x6:
2348 case 0x3:
2349 /* Effectively a 16-bit load. */
2350 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2351 len = 16;
2352 goto one_insert;
2354 case 0x8:
2355 case 0x4:
2356 case 0x2:
2357 case 0x1:
2358 /* Effectively an 8-bit load. */
2359 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2360 len = 8;
2361 goto one_insert;
2363 one_insert:
2364 pos = base + ctz32(m3) * 8;
2365 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2366 ccm = ((1ull << len) - 1) << pos;
2367 break;
2369 default:
2370 /* This is going to be a sequence of loads and inserts. */
2371 pos = base + 32 - 8;
2372 ccm = 0;
2373 while (m3) {
2374 if (m3 & 0x8) {
2375 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2376 tcg_gen_addi_i64(o->in2, o->in2, 1);
2377 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2378 ccm |= 0xff << pos;
2380 m3 = (m3 << 1) & 0xf;
2381 pos -= 8;
2383 break;
2386 tcg_gen_movi_i64(tmp, ccm);
2387 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2388 tcg_temp_free_i64(tmp);
2389 return NO_EXIT;
2392 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2394 int shift = s->insn->data & 0xff;
2395 int size = s->insn->data >> 8;
2396 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2397 return NO_EXIT;
2400 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2402 TCGv_i64 t1;
2404 gen_op_calc_cc(s);
2405 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2407 t1 = tcg_temp_new_i64();
2408 tcg_gen_shli_i64(t1, psw_mask, 20);
2409 tcg_gen_shri_i64(t1, t1, 36);
2410 tcg_gen_or_i64(o->out, o->out, t1);
2412 tcg_gen_extu_i32_i64(t1, cc_op);
2413 tcg_gen_shli_i64(t1, t1, 28);
2414 tcg_gen_or_i64(o->out, o->out, t1);
2415 tcg_temp_free_i64(t1);
2416 return NO_EXIT;
2419 #ifndef CONFIG_USER_ONLY
2420 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2422 TCGv_i32 m4;
2424 check_privileged(s);
2425 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2426 m4 = tcg_const_i32(get_field(s->fields, m4));
2427 } else {
2428 m4 = tcg_const_i32(0);
2430 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2431 tcg_temp_free_i32(m4);
2432 return NO_EXIT;
2435 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2437 TCGv_i32 m4;
2439 check_privileged(s);
2440 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2441 m4 = tcg_const_i32(get_field(s->fields, m4));
2442 } else {
2443 m4 = tcg_const_i32(0);
2445 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2446 tcg_temp_free_i32(m4);
2447 return NO_EXIT;
2450 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2452 check_privileged(s);
2453 gen_helper_iske(o->out, cpu_env, o->in2);
2454 return NO_EXIT;
2456 #endif
2458 static ExitStatus op_msa(DisasContext *s, DisasOps *o)
2460 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2461 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2462 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2463 TCGv_i32 t_r1, t_r2, t_r3, type;
2465 switch (s->insn->data) {
2466 case S390_FEAT_TYPE_KMCTR:
2467 if (r3 & 1 || !r3) {
2468 gen_program_exception(s, PGM_SPECIFICATION);
2469 return EXIT_NORETURN;
2471 /* FALL THROUGH */
2472 case S390_FEAT_TYPE_PPNO:
2473 case S390_FEAT_TYPE_KMF:
2474 case S390_FEAT_TYPE_KMC:
2475 case S390_FEAT_TYPE_KMO:
2476 case S390_FEAT_TYPE_KM:
2477 if (r1 & 1 || !r1) {
2478 gen_program_exception(s, PGM_SPECIFICATION);
2479 return EXIT_NORETURN;
2481 /* FALL THROUGH */
2482 case S390_FEAT_TYPE_KMAC:
2483 case S390_FEAT_TYPE_KIMD:
2484 case S390_FEAT_TYPE_KLMD:
2485 if (r2 & 1 || !r2) {
2486 gen_program_exception(s, PGM_SPECIFICATION);
2487 return EXIT_NORETURN;
2489 /* FALL THROUGH */
2490 case S390_FEAT_TYPE_PCKMO:
2491 case S390_FEAT_TYPE_PCC:
2492 break;
2493 default:
2494 g_assert_not_reached();
2497 t_r1 = tcg_const_i32(r1);
2498 t_r2 = tcg_const_i32(r2);
2499 t_r3 = tcg_const_i32(r3);
2500 type = tcg_const_i32(s->insn->data);
2501 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2502 set_cc_static(s);
2503 tcg_temp_free_i32(t_r1);
2504 tcg_temp_free_i32(t_r2);
2505 tcg_temp_free_i32(t_r3);
2506 tcg_temp_free_i32(type);
2507 return NO_EXIT;
2510 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2512 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2513 set_cc_static(s);
2514 return NO_EXIT;
2517 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2519 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2520 set_cc_static(s);
2521 return NO_EXIT;
2524 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2526 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2527 set_cc_static(s);
2528 return NO_EXIT;
2531 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2533 /* The real output is indeed the original value in memory;
2534 recompute the addition for the computation of CC. */
2535 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2536 s->insn->data | MO_ALIGN);
2537 /* However, we need to recompute the addition for setting CC. */
2538 tcg_gen_add_i64(o->out, o->in1, o->in2);
2539 return NO_EXIT;
2542 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2544 /* The real output is indeed the original value in memory;
2545 recompute the addition for the computation of CC. */
2546 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2547 s->insn->data | MO_ALIGN);
2548 /* However, we need to recompute the operation for setting CC. */
2549 tcg_gen_and_i64(o->out, o->in1, o->in2);
2550 return NO_EXIT;
2553 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2555 /* The real output is indeed the original value in memory;
2556 recompute the addition for the computation of CC. */
2557 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2558 s->insn->data | MO_ALIGN);
2559 /* However, we need to recompute the operation for setting CC. */
2560 tcg_gen_or_i64(o->out, o->in1, o->in2);
2561 return NO_EXIT;
2564 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2566 /* The real output is indeed the original value in memory;
2567 recompute the addition for the computation of CC. */
2568 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2569 s->insn->data | MO_ALIGN);
2570 /* However, we need to recompute the operation for setting CC. */
2571 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2572 return NO_EXIT;
2575 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2577 gen_helper_ldeb(o->out, cpu_env, o->in2);
2578 return NO_EXIT;
2581 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2583 gen_helper_ledb(o->out, cpu_env, o->in2);
2584 return NO_EXIT;
2587 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2589 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2590 return NO_EXIT;
2593 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2595 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2596 return NO_EXIT;
2599 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2601 gen_helper_lxdb(o->out, cpu_env, o->in2);
2602 return_low128(o->out2);
2603 return NO_EXIT;
2606 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2608 gen_helper_lxeb(o->out, cpu_env, o->in2);
2609 return_low128(o->out2);
2610 return NO_EXIT;
2613 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2615 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2616 return NO_EXIT;
2619 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2621 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2622 return NO_EXIT;
2625 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2627 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2628 return NO_EXIT;
2631 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2633 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2634 return NO_EXIT;
2637 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2639 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2640 return NO_EXIT;
2643 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2645 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2646 return NO_EXIT;
2649 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2651 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2652 return NO_EXIT;
2655 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2657 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2658 return NO_EXIT;
2661 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2663 TCGLabel *lab = gen_new_label();
2664 store_reg32_i64(get_field(s->fields, r1), o->in2);
2665 /* The value is stored even in case of trap. */
2666 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2667 gen_trap(s);
2668 gen_set_label(lab);
2669 return NO_EXIT;
2672 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2674 TCGLabel *lab = gen_new_label();
2675 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2676 /* The value is stored even in case of trap. */
2677 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2678 gen_trap(s);
2679 gen_set_label(lab);
2680 return NO_EXIT;
2683 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2685 TCGLabel *lab = gen_new_label();
2686 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2687 /* The value is stored even in case of trap. */
2688 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2689 gen_trap(s);
2690 gen_set_label(lab);
2691 return NO_EXIT;
2694 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2696 TCGLabel *lab = gen_new_label();
2697 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2698 /* The value is stored even in case of trap. */
2699 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2700 gen_trap(s);
2701 gen_set_label(lab);
2702 return NO_EXIT;
2705 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2707 TCGLabel *lab = gen_new_label();
2708 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2709 /* The value is stored even in case of trap. */
2710 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2711 gen_trap(s);
2712 gen_set_label(lab);
2713 return NO_EXIT;
2716 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2718 DisasCompare c;
2720 disas_jcc(s, &c, get_field(s->fields, m3));
2722 if (c.is_64) {
2723 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2724 o->in2, o->in1);
2725 free_compare(&c);
2726 } else {
2727 TCGv_i32 t32 = tcg_temp_new_i32();
2728 TCGv_i64 t, z;
2730 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2731 free_compare(&c);
2733 t = tcg_temp_new_i64();
2734 tcg_gen_extu_i32_i64(t, t32);
2735 tcg_temp_free_i32(t32);
2737 z = tcg_const_i64(0);
2738 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2739 tcg_temp_free_i64(t);
2740 tcg_temp_free_i64(z);
2743 return NO_EXIT;
2746 #ifndef CONFIG_USER_ONLY
2747 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2749 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2750 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2751 check_privileged(s);
2752 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2753 tcg_temp_free_i32(r1);
2754 tcg_temp_free_i32(r3);
2755 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2756 return EXIT_PC_STALE_NOCHAIN;
2759 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2761 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2762 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2763 check_privileged(s);
2764 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2765 tcg_temp_free_i32(r1);
2766 tcg_temp_free_i32(r3);
2767 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2768 return EXIT_PC_STALE_NOCHAIN;
2771 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2773 check_privileged(s);
2774 gen_helper_lra(o->out, cpu_env, o->in2);
2775 set_cc_static(s);
2776 return NO_EXIT;
2779 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2781 check_privileged(s);
2783 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2784 return NO_EXIT;
2787 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2789 TCGv_i64 t1, t2;
2791 check_privileged(s);
2792 per_breaking_event(s);
2794 t1 = tcg_temp_new_i64();
2795 t2 = tcg_temp_new_i64();
2796 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2797 tcg_gen_addi_i64(o->in2, o->in2, 4);
2798 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2799 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2800 tcg_gen_shli_i64(t1, t1, 32);
2801 gen_helper_load_psw(cpu_env, t1, t2);
2802 tcg_temp_free_i64(t1);
2803 tcg_temp_free_i64(t2);
2804 return EXIT_NORETURN;
2807 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2809 TCGv_i64 t1, t2;
2811 check_privileged(s);
2812 per_breaking_event(s);
2814 t1 = tcg_temp_new_i64();
2815 t2 = tcg_temp_new_i64();
2816 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2817 tcg_gen_addi_i64(o->in2, o->in2, 8);
2818 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2819 gen_helper_load_psw(cpu_env, t1, t2);
2820 tcg_temp_free_i64(t1);
2821 tcg_temp_free_i64(t2);
2822 return EXIT_NORETURN;
2824 #endif
2826 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2828 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2829 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2830 gen_helper_lam(cpu_env, r1, o->in2, r3);
2831 tcg_temp_free_i32(r1);
2832 tcg_temp_free_i32(r3);
2833 return NO_EXIT;
2836 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2838 int r1 = get_field(s->fields, r1);
2839 int r3 = get_field(s->fields, r3);
2840 TCGv_i64 t1, t2;
2842 /* Only one register to read. */
2843 t1 = tcg_temp_new_i64();
2844 if (unlikely(r1 == r3)) {
2845 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2846 store_reg32_i64(r1, t1);
2847 tcg_temp_free(t1);
2848 return NO_EXIT;
2851 /* First load the values of the first and last registers to trigger
2852 possible page faults. */
2853 t2 = tcg_temp_new_i64();
2854 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2855 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2856 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2857 store_reg32_i64(r1, t1);
2858 store_reg32_i64(r3, t2);
2860 /* Only two registers to read. */
2861 if (((r1 + 1) & 15) == r3) {
2862 tcg_temp_free(t2);
2863 tcg_temp_free(t1);
2864 return NO_EXIT;
2867 /* Then load the remaining registers. Page fault can't occur. */
2868 r3 = (r3 - 1) & 15;
2869 tcg_gen_movi_i64(t2, 4);
2870 while (r1 != r3) {
2871 r1 = (r1 + 1) & 15;
2872 tcg_gen_add_i64(o->in2, o->in2, t2);
2873 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2874 store_reg32_i64(r1, t1);
2876 tcg_temp_free(t2);
2877 tcg_temp_free(t1);
2879 return NO_EXIT;
2882 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2884 int r1 = get_field(s->fields, r1);
2885 int r3 = get_field(s->fields, r3);
2886 TCGv_i64 t1, t2;
2888 /* Only one register to read. */
2889 t1 = tcg_temp_new_i64();
2890 if (unlikely(r1 == r3)) {
2891 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2892 store_reg32h_i64(r1, t1);
2893 tcg_temp_free(t1);
2894 return NO_EXIT;
2897 /* First load the values of the first and last registers to trigger
2898 possible page faults. */
2899 t2 = tcg_temp_new_i64();
2900 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2901 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2902 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2903 store_reg32h_i64(r1, t1);
2904 store_reg32h_i64(r3, t2);
2906 /* Only two registers to read. */
2907 if (((r1 + 1) & 15) == r3) {
2908 tcg_temp_free(t2);
2909 tcg_temp_free(t1);
2910 return NO_EXIT;
2913 /* Then load the remaining registers. Page fault can't occur. */
2914 r3 = (r3 - 1) & 15;
2915 tcg_gen_movi_i64(t2, 4);
2916 while (r1 != r3) {
2917 r1 = (r1 + 1) & 15;
2918 tcg_gen_add_i64(o->in2, o->in2, t2);
2919 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2920 store_reg32h_i64(r1, t1);
2922 tcg_temp_free(t2);
2923 tcg_temp_free(t1);
2925 return NO_EXIT;
2928 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2930 int r1 = get_field(s->fields, r1);
2931 int r3 = get_field(s->fields, r3);
2932 TCGv_i64 t1, t2;
2934 /* Only one register to read. */
2935 if (unlikely(r1 == r3)) {
2936 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2937 return NO_EXIT;
2940 /* First load the values of the first and last registers to trigger
2941 possible page faults. */
2942 t1 = tcg_temp_new_i64();
2943 t2 = tcg_temp_new_i64();
2944 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2945 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2946 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2947 tcg_gen_mov_i64(regs[r1], t1);
2948 tcg_temp_free(t2);
2950 /* Only two registers to read. */
2951 if (((r1 + 1) & 15) == r3) {
2952 tcg_temp_free(t1);
2953 return NO_EXIT;
2956 /* Then load the remaining registers. Page fault can't occur. */
2957 r3 = (r3 - 1) & 15;
2958 tcg_gen_movi_i64(t1, 8);
2959 while (r1 != r3) {
2960 r1 = (r1 + 1) & 15;
2961 tcg_gen_add_i64(o->in2, o->in2, t1);
2962 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2964 tcg_temp_free(t1);
2966 return NO_EXIT;
2969 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2971 TCGv_i64 a1, a2;
2972 TCGMemOp mop = s->insn->data;
2974 /* In a parallel context, stop the world and single step. */
2975 if (tb_cflags(s->tb) & CF_PARALLEL) {
2976 update_psw_addr(s);
2977 update_cc_op(s);
2978 gen_exception(EXCP_ATOMIC);
2979 return EXIT_NORETURN;
2982 /* In a serial context, perform the two loads ... */
2983 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2984 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2985 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2986 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2987 tcg_temp_free_i64(a1);
2988 tcg_temp_free_i64(a2);
2990 /* ... and indicate that we performed them while interlocked. */
2991 gen_op_movi_cc(s, 0);
2992 return NO_EXIT;
2995 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2997 if (tb_cflags(s->tb) & CF_PARALLEL) {
2998 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
2999 } else {
3000 gen_helper_lpq(o->out, cpu_env, o->in2);
3002 return_low128(o->out2);
3003 return NO_EXIT;
3006 #ifndef CONFIG_USER_ONLY
3007 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
3009 check_privileged(s);
3010 gen_helper_lura(o->out, cpu_env, o->in2);
3011 return NO_EXIT;
3014 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
3016 check_privileged(s);
3017 gen_helper_lurag(o->out, cpu_env, o->in2);
3018 return NO_EXIT;
3020 #endif
3022 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
3024 tcg_gen_andi_i64(o->out, o->in2, -256);
3025 return NO_EXIT;
3028 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3030 o->out = o->in2;
3031 o->g_out = o->g_in2;
3032 o->in2 = NULL;
3033 o->g_in2 = false;
3034 return NO_EXIT;
3037 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
3039 int b2 = get_field(s->fields, b2);
3040 TCGv ar1 = tcg_temp_new_i64();
3042 o->out = o->in2;
3043 o->g_out = o->g_in2;
3044 o->in2 = NULL;
3045 o->g_in2 = false;
3047 switch (s->tb->flags & FLAG_MASK_ASC) {
3048 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3049 tcg_gen_movi_i64(ar1, 0);
3050 break;
3051 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3052 tcg_gen_movi_i64(ar1, 1);
3053 break;
3054 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3055 if (b2) {
3056 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3057 } else {
3058 tcg_gen_movi_i64(ar1, 0);
3060 break;
3061 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3062 tcg_gen_movi_i64(ar1, 2);
3063 break;
3066 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3067 tcg_temp_free_i64(ar1);
3069 return NO_EXIT;
3072 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3074 o->out = o->in1;
3075 o->out2 = o->in2;
3076 o->g_out = o->g_in1;
3077 o->g_out2 = o->g_in2;
3078 o->in1 = NULL;
3079 o->in2 = NULL;
3080 o->g_in1 = o->g_in2 = false;
3081 return NO_EXIT;
3084 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3086 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3087 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3088 tcg_temp_free_i32(l);
3089 return NO_EXIT;
3092 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3094 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3095 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3096 tcg_temp_free_i32(l);
3097 return NO_EXIT;
3100 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3102 int r1 = get_field(s->fields, r1);
3103 int r2 = get_field(s->fields, r2);
3104 TCGv_i32 t1, t2;
3106 /* r1 and r2 must be even. */
3107 if (r1 & 1 || r2 & 1) {
3108 gen_program_exception(s, PGM_SPECIFICATION);
3109 return EXIT_NORETURN;
3112 t1 = tcg_const_i32(r1);
3113 t2 = tcg_const_i32(r2);
3114 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3115 tcg_temp_free_i32(t1);
3116 tcg_temp_free_i32(t2);
3117 set_cc_static(s);
3118 return NO_EXIT;
3121 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3123 int r1 = get_field(s->fields, r1);
3124 int r3 = get_field(s->fields, r3);
3125 TCGv_i32 t1, t3;
3127 /* r1 and r3 must be even. */
3128 if (r1 & 1 || r3 & 1) {
3129 gen_program_exception(s, PGM_SPECIFICATION);
3130 return EXIT_NORETURN;
3133 t1 = tcg_const_i32(r1);
3134 t3 = tcg_const_i32(r3);
3135 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3136 tcg_temp_free_i32(t1);
3137 tcg_temp_free_i32(t3);
3138 set_cc_static(s);
3139 return NO_EXIT;
3142 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3144 int r1 = get_field(s->fields, r1);
3145 int r3 = get_field(s->fields, r3);
3146 TCGv_i32 t1, t3;
3148 /* r1 and r3 must be even. */
3149 if (r1 & 1 || r3 & 1) {
3150 gen_program_exception(s, PGM_SPECIFICATION);
3151 return EXIT_NORETURN;
3154 t1 = tcg_const_i32(r1);
3155 t3 = tcg_const_i32(r3);
3156 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3157 tcg_temp_free_i32(t1);
3158 tcg_temp_free_i32(t3);
3159 set_cc_static(s);
3160 return NO_EXIT;
3163 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3165 int r3 = get_field(s->fields, r3);
3166 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3167 set_cc_static(s);
3168 return NO_EXIT;
3171 #ifndef CONFIG_USER_ONLY
3172 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3174 int r1 = get_field(s->fields, l1);
3175 check_privileged(s);
3176 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3177 set_cc_static(s);
3178 return NO_EXIT;
3181 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3183 int r1 = get_field(s->fields, l1);
3184 check_privileged(s);
3185 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3186 set_cc_static(s);
3187 return NO_EXIT;
3189 #endif
3191 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3193 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3194 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3195 tcg_temp_free_i32(l);
3196 return NO_EXIT;
3199 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3201 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3202 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3203 tcg_temp_free_i32(l);
3204 return NO_EXIT;
3207 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3209 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3210 set_cc_static(s);
3211 return NO_EXIT;
3214 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3216 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3217 set_cc_static(s);
3218 return_low128(o->in2);
3219 return NO_EXIT;
3222 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3224 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3225 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3226 tcg_temp_free_i32(l);
3227 return NO_EXIT;
3230 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3232 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3233 return NO_EXIT;
3236 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3238 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3239 return NO_EXIT;
3242 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3244 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3245 return NO_EXIT;
3248 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3250 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3251 return NO_EXIT;
3254 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3256 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3257 return NO_EXIT;
3260 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3262 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3263 return_low128(o->out2);
3264 return NO_EXIT;
3267 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3269 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3270 return_low128(o->out2);
3271 return NO_EXIT;
3274 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3276 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3277 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3278 tcg_temp_free_i64(r3);
3279 return NO_EXIT;
3282 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3284 int r3 = get_field(s->fields, r3);
3285 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3286 return NO_EXIT;
3289 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3291 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3292 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3293 tcg_temp_free_i64(r3);
3294 return NO_EXIT;
3297 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3299 int r3 = get_field(s->fields, r3);
3300 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3301 return NO_EXIT;
3304 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3306 TCGv_i64 z, n;
3307 z = tcg_const_i64(0);
3308 n = tcg_temp_new_i64();
3309 tcg_gen_neg_i64(n, o->in2);
3310 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3311 tcg_temp_free_i64(n);
3312 tcg_temp_free_i64(z);
3313 return NO_EXIT;
3316 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3318 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3319 return NO_EXIT;
3322 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3324 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3325 return NO_EXIT;
3328 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3330 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3331 tcg_gen_mov_i64(o->out2, o->in2);
3332 return NO_EXIT;
3335 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3337 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3338 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3339 tcg_temp_free_i32(l);
3340 set_cc_static(s);
3341 return NO_EXIT;
3344 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3346 tcg_gen_neg_i64(o->out, o->in2);
3347 return NO_EXIT;
3350 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3352 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3353 return NO_EXIT;
3356 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3358 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3359 return NO_EXIT;
3362 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3364 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3365 tcg_gen_mov_i64(o->out2, o->in2);
3366 return NO_EXIT;
3369 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3371 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3372 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3373 tcg_temp_free_i32(l);
3374 set_cc_static(s);
3375 return NO_EXIT;
3378 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3380 tcg_gen_or_i64(o->out, o->in1, o->in2);
3381 return NO_EXIT;
3384 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3386 int shift = s->insn->data & 0xff;
3387 int size = s->insn->data >> 8;
3388 uint64_t mask = ((1ull << size) - 1) << shift;
3390 assert(!o->g_in2);
3391 tcg_gen_shli_i64(o->in2, o->in2, shift);
3392 tcg_gen_or_i64(o->out, o->in1, o->in2);
3394 /* Produce the CC from only the bits manipulated. */
3395 tcg_gen_andi_i64(cc_dst, o->out, mask);
3396 set_cc_nz_u64(s, cc_dst);
3397 return NO_EXIT;
3400 static ExitStatus op_oi(DisasContext *s, DisasOps *o)
3402 o->in1 = tcg_temp_new_i64();
3404 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3405 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3406 } else {
3407 /* Perform the atomic operation in memory. */
3408 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3409 s->insn->data);
3412 /* Recompute also for atomic case: needed for setting CC. */
3413 tcg_gen_or_i64(o->out, o->in1, o->in2);
3415 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3416 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3418 return NO_EXIT;
3421 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3423 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3424 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3425 tcg_temp_free_i32(l);
3426 return NO_EXIT;
3429 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3431 int l2 = get_field(s->fields, l2) + 1;
3432 TCGv_i32 l;
3434 /* The length must not exceed 32 bytes. */
3435 if (l2 > 32) {
3436 gen_program_exception(s, PGM_SPECIFICATION);
3437 return EXIT_NORETURN;
3439 l = tcg_const_i32(l2);
3440 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3441 tcg_temp_free_i32(l);
3442 return NO_EXIT;
3445 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3447 int l2 = get_field(s->fields, l2) + 1;
3448 TCGv_i32 l;
3450 /* The length must be even and should not exceed 64 bytes. */
3451 if ((l2 & 1) || (l2 > 64)) {
3452 gen_program_exception(s, PGM_SPECIFICATION);
3453 return EXIT_NORETURN;
3455 l = tcg_const_i32(l2);
3456 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3457 tcg_temp_free_i32(l);
3458 return NO_EXIT;
3461 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3463 gen_helper_popcnt(o->out, o->in2);
3464 return NO_EXIT;
3467 #ifndef CONFIG_USER_ONLY
3468 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3470 check_privileged(s);
3471 gen_helper_ptlb(cpu_env);
3472 return NO_EXIT;
3474 #endif
3476 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3478 int i3 = get_field(s->fields, i3);
3479 int i4 = get_field(s->fields, i4);
3480 int i5 = get_field(s->fields, i5);
3481 int do_zero = i4 & 0x80;
3482 uint64_t mask, imask, pmask;
3483 int pos, len, rot;
3485 /* Adjust the arguments for the specific insn. */
3486 switch (s->fields->op2) {
3487 case 0x55: /* risbg */
3488 case 0x59: /* risbgn */
3489 i3 &= 63;
3490 i4 &= 63;
3491 pmask = ~0;
3492 break;
3493 case 0x5d: /* risbhg */
3494 i3 &= 31;
3495 i4 &= 31;
3496 pmask = 0xffffffff00000000ull;
3497 break;
3498 case 0x51: /* risblg */
3499 i3 &= 31;
3500 i4 &= 31;
3501 pmask = 0x00000000ffffffffull;
3502 break;
3503 default:
3504 g_assert_not_reached();
3507 /* MASK is the set of bits to be inserted from R2.
3508 Take care for I3/I4 wraparound. */
3509 mask = pmask >> i3;
3510 if (i3 <= i4) {
3511 mask ^= pmask >> i4 >> 1;
3512 } else {
3513 mask |= ~(pmask >> i4 >> 1);
3515 mask &= pmask;
3517 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3518 insns, we need to keep the other half of the register. */
3519 imask = ~mask | ~pmask;
3520 if (do_zero) {
3521 imask = ~pmask;
3524 len = i4 - i3 + 1;
3525 pos = 63 - i4;
3526 rot = i5 & 63;
3527 if (s->fields->op2 == 0x5d) {
3528 pos += 32;
3531 /* In some cases we can implement this with extract. */
3532 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3533 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3534 return NO_EXIT;
3537 /* In some cases we can implement this with deposit. */
3538 if (len > 0 && (imask == 0 || ~mask == imask)) {
3539 /* Note that we rotate the bits to be inserted to the lsb, not to
3540 the position as described in the PoO. */
3541 rot = (rot - pos) & 63;
3542 } else {
3543 pos = -1;
3546 /* Rotate the input as necessary. */
3547 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3549 /* Insert the selected bits into the output. */
3550 if (pos >= 0) {
3551 if (imask == 0) {
3552 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3553 } else {
3554 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3556 } else if (imask == 0) {
3557 tcg_gen_andi_i64(o->out, o->in2, mask);
3558 } else {
3559 tcg_gen_andi_i64(o->in2, o->in2, mask);
3560 tcg_gen_andi_i64(o->out, o->out, imask);
3561 tcg_gen_or_i64(o->out, o->out, o->in2);
3563 return NO_EXIT;
3566 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3568 int i3 = get_field(s->fields, i3);
3569 int i4 = get_field(s->fields, i4);
3570 int i5 = get_field(s->fields, i5);
3571 uint64_t mask;
3573 /* If this is a test-only form, arrange to discard the result. */
3574 if (i3 & 0x80) {
3575 o->out = tcg_temp_new_i64();
3576 o->g_out = false;
3579 i3 &= 63;
3580 i4 &= 63;
3581 i5 &= 63;
3583 /* MASK is the set of bits to be operated on from R2.
3584 Take care for I3/I4 wraparound. */
3585 mask = ~0ull >> i3;
3586 if (i3 <= i4) {
3587 mask ^= ~0ull >> i4 >> 1;
3588 } else {
3589 mask |= ~(~0ull >> i4 >> 1);
3592 /* Rotate the input as necessary. */
3593 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3595 /* Operate. */
3596 switch (s->fields->op2) {
3597 case 0x55: /* AND */
3598 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3599 tcg_gen_and_i64(o->out, o->out, o->in2);
3600 break;
3601 case 0x56: /* OR */
3602 tcg_gen_andi_i64(o->in2, o->in2, mask);
3603 tcg_gen_or_i64(o->out, o->out, o->in2);
3604 break;
3605 case 0x57: /* XOR */
3606 tcg_gen_andi_i64(o->in2, o->in2, mask);
3607 tcg_gen_xor_i64(o->out, o->out, o->in2);
3608 break;
3609 default:
3610 abort();
3613 /* Set the CC. */
3614 tcg_gen_andi_i64(cc_dst, o->out, mask);
3615 set_cc_nz_u64(s, cc_dst);
3616 return NO_EXIT;
3619 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3621 tcg_gen_bswap16_i64(o->out, o->in2);
3622 return NO_EXIT;
3625 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3627 tcg_gen_bswap32_i64(o->out, o->in2);
3628 return NO_EXIT;
3631 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3633 tcg_gen_bswap64_i64(o->out, o->in2);
3634 return NO_EXIT;
3637 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3639 TCGv_i32 t1 = tcg_temp_new_i32();
3640 TCGv_i32 t2 = tcg_temp_new_i32();
3641 TCGv_i32 to = tcg_temp_new_i32();
3642 tcg_gen_extrl_i64_i32(t1, o->in1);
3643 tcg_gen_extrl_i64_i32(t2, o->in2);
3644 tcg_gen_rotl_i32(to, t1, t2);
3645 tcg_gen_extu_i32_i64(o->out, to);
3646 tcg_temp_free_i32(t1);
3647 tcg_temp_free_i32(t2);
3648 tcg_temp_free_i32(to);
3649 return NO_EXIT;
3652 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3654 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3655 return NO_EXIT;
3658 #ifndef CONFIG_USER_ONLY
3659 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3661 check_privileged(s);
3662 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3663 set_cc_static(s);
3664 return NO_EXIT;
3667 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3669 check_privileged(s);
3670 gen_helper_sacf(cpu_env, o->in2);
3671 /* Addressing mode has changed, so end the block. */
3672 return EXIT_PC_STALE;
3674 #endif
3676 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3678 int sam = s->insn->data;
3679 TCGv_i64 tsam;
3680 uint64_t mask;
3682 switch (sam) {
3683 case 0:
3684 mask = 0xffffff;
3685 break;
3686 case 1:
3687 mask = 0x7fffffff;
3688 break;
3689 default:
3690 mask = -1;
3691 break;
3694 /* Bizarre but true, we check the address of the current insn for the
3695 specification exception, not the next to be executed. Thus the PoO
3696 documents that Bad Things Happen two bytes before the end. */
3697 if (s->pc & ~mask) {
3698 gen_program_exception(s, PGM_SPECIFICATION);
3699 return EXIT_NORETURN;
3701 s->next_pc &= mask;
3703 tsam = tcg_const_i64(sam);
3704 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3705 tcg_temp_free_i64(tsam);
3707 /* Always exit the TB, since we (may have) changed execution mode. */
3708 return EXIT_PC_STALE;
3711 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3713 int r1 = get_field(s->fields, r1);
3714 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3715 return NO_EXIT;
3718 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3720 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3721 return NO_EXIT;
3724 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3726 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3727 return NO_EXIT;
3730 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3732 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3733 return_low128(o->out2);
3734 return NO_EXIT;
3737 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3739 gen_helper_sqeb(o->out, cpu_env, o->in2);
3740 return NO_EXIT;
3743 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3745 gen_helper_sqdb(o->out, cpu_env, o->in2);
3746 return NO_EXIT;
3749 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3751 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3752 return_low128(o->out2);
3753 return NO_EXIT;
3756 #ifndef CONFIG_USER_ONLY
3757 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3759 check_privileged(s);
3760 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3761 set_cc_static(s);
3762 return NO_EXIT;
3765 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3767 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3768 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3769 check_privileged(s);
3770 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3771 set_cc_static(s);
3772 tcg_temp_free_i32(r1);
3773 tcg_temp_free_i32(r3);
3774 return NO_EXIT;
3776 #endif
3778 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3780 DisasCompare c;
3781 TCGv_i64 a, h;
3782 TCGLabel *lab;
3783 int r1;
3785 disas_jcc(s, &c, get_field(s->fields, m3));
3787 /* We want to store when the condition is fulfilled, so branch
3788 out when it's not */
3789 c.cond = tcg_invert_cond(c.cond);
3791 lab = gen_new_label();
3792 if (c.is_64) {
3793 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3794 } else {
3795 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3797 free_compare(&c);
3799 r1 = get_field(s->fields, r1);
3800 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3801 switch (s->insn->data) {
3802 case 1: /* STOCG */
3803 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3804 break;
3805 case 0: /* STOC */
3806 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3807 break;
3808 case 2: /* STOCFH */
3809 h = tcg_temp_new_i64();
3810 tcg_gen_shri_i64(h, regs[r1], 32);
3811 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3812 tcg_temp_free_i64(h);
3813 break;
3814 default:
3815 g_assert_not_reached();
3817 tcg_temp_free_i64(a);
3819 gen_set_label(lab);
3820 return NO_EXIT;
3823 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3825 uint64_t sign = 1ull << s->insn->data;
3826 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3827 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3828 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3829 /* The arithmetic left shift is curious in that it does not affect
3830 the sign bit. Copy that over from the source unchanged. */
3831 tcg_gen_andi_i64(o->out, o->out, ~sign);
3832 tcg_gen_andi_i64(o->in1, o->in1, sign);
3833 tcg_gen_or_i64(o->out, o->out, o->in1);
3834 return NO_EXIT;
3837 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3839 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3840 return NO_EXIT;
3843 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3845 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3846 return NO_EXIT;
3849 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3851 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3852 return NO_EXIT;
3855 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3857 gen_helper_sfpc(cpu_env, o->in2);
3858 return NO_EXIT;
3861 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3863 gen_helper_sfas(cpu_env, o->in2);
3864 return NO_EXIT;
3867 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3869 int b2 = get_field(s->fields, b2);
3870 int d2 = get_field(s->fields, d2);
3871 TCGv_i64 t1 = tcg_temp_new_i64();
3872 TCGv_i64 t2 = tcg_temp_new_i64();
3873 int mask, pos, len;
3875 switch (s->fields->op2) {
3876 case 0x99: /* SRNM */
3877 pos = 0, len = 2;
3878 break;
3879 case 0xb8: /* SRNMB */
3880 pos = 0, len = 3;
3881 break;
3882 case 0xb9: /* SRNMT */
3883 pos = 4, len = 3;
3884 break;
3885 default:
3886 tcg_abort();
3888 mask = (1 << len) - 1;
3890 /* Insert the value into the appropriate field of the FPC. */
3891 if (b2 == 0) {
3892 tcg_gen_movi_i64(t1, d2 & mask);
3893 } else {
3894 tcg_gen_addi_i64(t1, regs[b2], d2);
3895 tcg_gen_andi_i64(t1, t1, mask);
3897 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3898 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3899 tcg_temp_free_i64(t1);
3901 /* Then install the new FPC to set the rounding mode in fpu_status. */
3902 gen_helper_sfpc(cpu_env, t2);
3903 tcg_temp_free_i64(t2);
3904 return NO_EXIT;
3907 static ExitStatus op_spm(DisasContext *s, DisasOps *o)
3909 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3910 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3911 set_cc_static(s);
3913 tcg_gen_shri_i64(o->in1, o->in1, 24);
3914 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3915 return NO_EXIT;
3918 static ExitStatus op_ectg(DisasContext *s, DisasOps *o)
3920 int b1 = get_field(s->fields, b1);
3921 int d1 = get_field(s->fields, d1);
3922 int b2 = get_field(s->fields, b2);
3923 int d2 = get_field(s->fields, d2);
3924 int r3 = get_field(s->fields, r3);
3925 TCGv_i64 tmp = tcg_temp_new_i64();
3927 /* fetch all operands first */
3928 o->in1 = tcg_temp_new_i64();
3929 tcg_gen_addi_i64(o->in1, regs[b1], d1);
3930 o->in2 = tcg_temp_new_i64();
3931 tcg_gen_addi_i64(o->in2, regs[b2], d2);
3932 o->addr1 = get_address(s, 0, r3, 0);
3934 /* load the third operand into r3 before modifying anything */
3935 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
3937 /* subtract CPU timer from first operand and store in GR0 */
3938 gen_helper_stpt(tmp, cpu_env);
3939 tcg_gen_sub_i64(regs[0], o->in1, tmp);
3941 /* store second operand in GR1 */
3942 tcg_gen_mov_i64(regs[1], o->in2);
3944 tcg_temp_free_i64(tmp);
3945 return NO_EXIT;
3948 #ifndef CONFIG_USER_ONLY
3949 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3951 check_privileged(s);
3952 tcg_gen_shri_i64(o->in2, o->in2, 4);
3953 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3954 return NO_EXIT;
3957 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3959 check_privileged(s);
3960 gen_helper_sske(cpu_env, o->in1, o->in2);
3961 return NO_EXIT;
3964 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3966 check_privileged(s);
3967 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3968 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3969 return EXIT_PC_STALE_NOCHAIN;
3972 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3974 check_privileged(s);
3975 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3976 return NO_EXIT;
3979 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3981 gen_helper_stck(o->out, cpu_env);
3982 /* ??? We don't implement clock states. */
3983 gen_op_movi_cc(s, 0);
3984 return NO_EXIT;
3987 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3989 TCGv_i64 c1 = tcg_temp_new_i64();
3990 TCGv_i64 c2 = tcg_temp_new_i64();
3991 TCGv_i64 todpr = tcg_temp_new_i64();
3992 gen_helper_stck(c1, cpu_env);
3993 /* 16 bit value store in an uint32_t (only valid bits set) */
3994 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
3995 /* Shift the 64-bit value into its place as a zero-extended
3996 104-bit value. Note that "bit positions 64-103 are always
3997 non-zero so that they compare differently to STCK"; we set
3998 the least significant bit to 1. */
3999 tcg_gen_shli_i64(c2, c1, 56);
4000 tcg_gen_shri_i64(c1, c1, 8);
4001 tcg_gen_ori_i64(c2, c2, 0x10000);
4002 tcg_gen_or_i64(c2, c2, todpr);
4003 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4004 tcg_gen_addi_i64(o->in2, o->in2, 8);
4005 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4006 tcg_temp_free_i64(c1);
4007 tcg_temp_free_i64(c2);
4008 tcg_temp_free_i64(todpr);
4009 /* ??? We don't implement clock states. */
4010 gen_op_movi_cc(s, 0);
4011 return NO_EXIT;
4014 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
4016 check_privileged(s);
4017 gen_helper_sckc(cpu_env, o->in2);
4018 return NO_EXIT;
4021 static ExitStatus op_sckpf(DisasContext *s, DisasOps *o)
4023 check_privileged(s);
4024 gen_helper_sckpf(cpu_env, regs[0]);
4025 return NO_EXIT;
4028 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
4030 check_privileged(s);
4031 gen_helper_stckc(o->out, cpu_env);
4032 return NO_EXIT;
4035 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
4037 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4038 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4039 check_privileged(s);
4040 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4041 tcg_temp_free_i32(r1);
4042 tcg_temp_free_i32(r3);
4043 return NO_EXIT;
4046 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
4048 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4049 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4050 check_privileged(s);
4051 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4052 tcg_temp_free_i32(r1);
4053 tcg_temp_free_i32(r3);
4054 return NO_EXIT;
4057 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
4059 check_privileged(s);
4060 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4061 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4062 return NO_EXIT;
4065 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
4067 check_privileged(s);
4068 gen_helper_spt(cpu_env, o->in2);
4069 return NO_EXIT;
4072 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
4074 check_privileged(s);
4075 gen_helper_stfl(cpu_env);
4076 return NO_EXIT;
4079 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
4081 check_privileged(s);
4082 gen_helper_stpt(o->out, cpu_env);
4083 return NO_EXIT;
4086 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
4088 check_privileged(s);
4089 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4090 set_cc_static(s);
4091 return NO_EXIT;
4094 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
4096 check_privileged(s);
4097 gen_helper_spx(cpu_env, o->in2);
4098 return NO_EXIT;
4101 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4103 check_privileged(s);
4104 gen_helper_xsch(cpu_env, regs[1]);
4105 set_cc_static(s);
4106 return NO_EXIT;
4109 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4111 check_privileged(s);
4112 gen_helper_csch(cpu_env, regs[1]);
4113 set_cc_static(s);
4114 return NO_EXIT;
4117 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4119 check_privileged(s);
4120 gen_helper_hsch(cpu_env, regs[1]);
4121 set_cc_static(s);
4122 return NO_EXIT;
4125 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4127 check_privileged(s);
4128 gen_helper_msch(cpu_env, regs[1], o->in2);
4129 set_cc_static(s);
4130 return NO_EXIT;
4133 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4135 check_privileged(s);
4136 gen_helper_rchp(cpu_env, regs[1]);
4137 set_cc_static(s);
4138 return NO_EXIT;
4141 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4143 check_privileged(s);
4144 gen_helper_rsch(cpu_env, regs[1]);
4145 set_cc_static(s);
4146 return NO_EXIT;
4149 static ExitStatus op_sal(DisasContext *s, DisasOps *o)
4151 check_privileged(s);
4152 gen_helper_sal(cpu_env, regs[1]);
4153 return NO_EXIT;
4156 static ExitStatus op_schm(DisasContext *s, DisasOps *o)
4158 check_privileged(s);
4159 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4160 return NO_EXIT;
4163 static ExitStatus op_siga(DisasContext *s, DisasOps *o)
4165 check_privileged(s);
4166 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4167 gen_op_movi_cc(s, 3);
4168 return NO_EXIT;
4171 static ExitStatus op_stcps(DisasContext *s, DisasOps *o)
4173 check_privileged(s);
4174 /* The instruction is suppressed if not provided. */
4175 return NO_EXIT;
4178 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4180 check_privileged(s);
4181 gen_helper_ssch(cpu_env, regs[1], o->in2);
4182 set_cc_static(s);
4183 return NO_EXIT;
4186 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4188 check_privileged(s);
4189 gen_helper_stsch(cpu_env, regs[1], o->in2);
4190 set_cc_static(s);
4191 return NO_EXIT;
4194 static ExitStatus op_stcrw(DisasContext *s, DisasOps *o)
4196 check_privileged(s);
4197 gen_helper_stcrw(cpu_env, o->in2);
4198 set_cc_static(s);
4199 return NO_EXIT;
4202 static ExitStatus op_tpi(DisasContext *s, DisasOps *o)
4204 check_privileged(s);
4205 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4206 set_cc_static(s);
4207 return NO_EXIT;
4210 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4212 check_privileged(s);
4213 gen_helper_tsch(cpu_env, regs[1], o->in2);
4214 set_cc_static(s);
4215 return NO_EXIT;
4218 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4220 check_privileged(s);
4221 gen_helper_chsc(cpu_env, o->in2);
4222 set_cc_static(s);
4223 return NO_EXIT;
4226 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4228 check_privileged(s);
4229 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4230 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4231 return NO_EXIT;
4234 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4236 uint64_t i2 = get_field(s->fields, i2);
4237 TCGv_i64 t;
4239 check_privileged(s);
4241 /* It is important to do what the instruction name says: STORE THEN.
4242 If we let the output hook perform the store then if we fault and
4243 restart, we'll have the wrong SYSTEM MASK in place. */
4244 t = tcg_temp_new_i64();
4245 tcg_gen_shri_i64(t, psw_mask, 56);
4246 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4247 tcg_temp_free_i64(t);
4249 if (s->fields->op == 0xac) {
4250 tcg_gen_andi_i64(psw_mask, psw_mask,
4251 (i2 << 56) | 0x00ffffffffffffffull);
4252 } else {
4253 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4256 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4257 return EXIT_PC_STALE_NOCHAIN;
4260 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4262 check_privileged(s);
4263 gen_helper_stura(cpu_env, o->in2, o->in1);
4264 return NO_EXIT;
4267 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4269 check_privileged(s);
4270 gen_helper_sturg(cpu_env, o->in2, o->in1);
4271 return NO_EXIT;
4273 #endif
4275 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4277 gen_helper_stfle(cc_op, cpu_env, o->in2);
4278 set_cc_static(s);
4279 return NO_EXIT;
4282 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4284 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4285 return NO_EXIT;
4288 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4290 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4291 return NO_EXIT;
4294 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4296 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4297 return NO_EXIT;
4300 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4302 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4303 return NO_EXIT;
4306 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4308 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4309 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4310 gen_helper_stam(cpu_env, r1, o->in2, r3);
4311 tcg_temp_free_i32(r1);
4312 tcg_temp_free_i32(r3);
4313 return NO_EXIT;
4316 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4318 int m3 = get_field(s->fields, m3);
4319 int pos, base = s->insn->data;
4320 TCGv_i64 tmp = tcg_temp_new_i64();
4322 pos = base + ctz32(m3) * 8;
4323 switch (m3) {
4324 case 0xf:
4325 /* Effectively a 32-bit store. */
4326 tcg_gen_shri_i64(tmp, o->in1, pos);
4327 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4328 break;
4330 case 0xc:
4331 case 0x6:
4332 case 0x3:
4333 /* Effectively a 16-bit store. */
4334 tcg_gen_shri_i64(tmp, o->in1, pos);
4335 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4336 break;
4338 case 0x8:
4339 case 0x4:
4340 case 0x2:
4341 case 0x1:
4342 /* Effectively an 8-bit store. */
4343 tcg_gen_shri_i64(tmp, o->in1, pos);
4344 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4345 break;
4347 default:
4348 /* This is going to be a sequence of shifts and stores. */
4349 pos = base + 32 - 8;
4350 while (m3) {
4351 if (m3 & 0x8) {
4352 tcg_gen_shri_i64(tmp, o->in1, pos);
4353 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4354 tcg_gen_addi_i64(o->in2, o->in2, 1);
4356 m3 = (m3 << 1) & 0xf;
4357 pos -= 8;
4359 break;
4361 tcg_temp_free_i64(tmp);
4362 return NO_EXIT;
4365 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4367 int r1 = get_field(s->fields, r1);
4368 int r3 = get_field(s->fields, r3);
4369 int size = s->insn->data;
4370 TCGv_i64 tsize = tcg_const_i64(size);
4372 while (1) {
4373 if (size == 8) {
4374 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4375 } else {
4376 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4378 if (r1 == r3) {
4379 break;
4381 tcg_gen_add_i64(o->in2, o->in2, tsize);
4382 r1 = (r1 + 1) & 15;
4385 tcg_temp_free_i64(tsize);
4386 return NO_EXIT;
4389 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4391 int r1 = get_field(s->fields, r1);
4392 int r3 = get_field(s->fields, r3);
4393 TCGv_i64 t = tcg_temp_new_i64();
4394 TCGv_i64 t4 = tcg_const_i64(4);
4395 TCGv_i64 t32 = tcg_const_i64(32);
4397 while (1) {
4398 tcg_gen_shl_i64(t, regs[r1], t32);
4399 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4400 if (r1 == r3) {
4401 break;
4403 tcg_gen_add_i64(o->in2, o->in2, t4);
4404 r1 = (r1 + 1) & 15;
4407 tcg_temp_free_i64(t);
4408 tcg_temp_free_i64(t4);
4409 tcg_temp_free_i64(t32);
4410 return NO_EXIT;
4413 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4415 if (tb_cflags(s->tb) & CF_PARALLEL) {
4416 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4417 } else {
4418 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4420 return NO_EXIT;
4423 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4425 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4426 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4428 gen_helper_srst(cpu_env, r1, r2);
4430 tcg_temp_free_i32(r1);
4431 tcg_temp_free_i32(r2);
4432 set_cc_static(s);
4433 return NO_EXIT;
4436 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4438 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4439 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4441 gen_helper_srstu(cpu_env, r1, r2);
4443 tcg_temp_free_i32(r1);
4444 tcg_temp_free_i32(r2);
4445 set_cc_static(s);
4446 return NO_EXIT;
4449 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4451 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4452 return NO_EXIT;
4455 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4457 DisasCompare cmp;
4458 TCGv_i64 borrow;
4460 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4462 /* The !borrow flag is the msb of CC. Since we want the inverse of
4463 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4464 disas_jcc(s, &cmp, 8 | 4);
4465 borrow = tcg_temp_new_i64();
4466 if (cmp.is_64) {
4467 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4468 } else {
4469 TCGv_i32 t = tcg_temp_new_i32();
4470 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4471 tcg_gen_extu_i32_i64(borrow, t);
4472 tcg_temp_free_i32(t);
4474 free_compare(&cmp);
4476 tcg_gen_sub_i64(o->out, o->out, borrow);
4477 tcg_temp_free_i64(borrow);
4478 return NO_EXIT;
4481 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4483 TCGv_i32 t;
4485 update_psw_addr(s);
4486 update_cc_op(s);
4488 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4489 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4490 tcg_temp_free_i32(t);
4492 t = tcg_const_i32(s->ilen);
4493 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4494 tcg_temp_free_i32(t);
4496 gen_exception(EXCP_SVC);
4497 return EXIT_NORETURN;
4500 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4502 int cc = 0;
4504 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4505 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4506 gen_op_movi_cc(s, cc);
4507 return NO_EXIT;
4510 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4512 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4513 set_cc_static(s);
4514 return NO_EXIT;
4517 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4519 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4520 set_cc_static(s);
4521 return NO_EXIT;
4524 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4526 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4527 set_cc_static(s);
4528 return NO_EXIT;
4531 #ifndef CONFIG_USER_ONLY
4533 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4535 check_privileged(s);
4536 gen_helper_testblock(cc_op, cpu_env, o->in2);
4537 set_cc_static(s);
4538 return NO_EXIT;
4541 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4543 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4544 set_cc_static(s);
4545 return NO_EXIT;
4548 #endif
4550 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4552 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4553 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4554 tcg_temp_free_i32(l1);
4555 set_cc_static(s);
4556 return NO_EXIT;
4559 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4561 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4562 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4563 tcg_temp_free_i32(l);
4564 set_cc_static(s);
4565 return NO_EXIT;
4568 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4570 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4571 return_low128(o->out2);
4572 set_cc_static(s);
4573 return NO_EXIT;
4576 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4578 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4579 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4580 tcg_temp_free_i32(l);
4581 set_cc_static(s);
4582 return NO_EXIT;
4585 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4587 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4588 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4589 tcg_temp_free_i32(l);
4590 set_cc_static(s);
4591 return NO_EXIT;
4594 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4596 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4597 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4598 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4599 TCGv_i32 tst = tcg_temp_new_i32();
4600 int m3 = get_field(s->fields, m3);
4602 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4603 m3 = 0;
4605 if (m3 & 1) {
4606 tcg_gen_movi_i32(tst, -1);
4607 } else {
4608 tcg_gen_extrl_i64_i32(tst, regs[0]);
4609 if (s->insn->opc & 3) {
4610 tcg_gen_ext8u_i32(tst, tst);
4611 } else {
4612 tcg_gen_ext16u_i32(tst, tst);
4615 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4617 tcg_temp_free_i32(r1);
4618 tcg_temp_free_i32(r2);
4619 tcg_temp_free_i32(sizes);
4620 tcg_temp_free_i32(tst);
4621 set_cc_static(s);
4622 return NO_EXIT;
4625 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4627 TCGv_i32 t1 = tcg_const_i32(0xff);
4628 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4629 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4630 tcg_temp_free_i32(t1);
4631 set_cc_static(s);
4632 return NO_EXIT;
4635 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4637 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4638 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4639 tcg_temp_free_i32(l);
4640 return NO_EXIT;
4643 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4645 int l1 = get_field(s->fields, l1) + 1;
4646 TCGv_i32 l;
4648 /* The length must not exceed 32 bytes. */
4649 if (l1 > 32) {
4650 gen_program_exception(s, PGM_SPECIFICATION);
4651 return EXIT_NORETURN;
4653 l = tcg_const_i32(l1);
4654 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4655 tcg_temp_free_i32(l);
4656 set_cc_static(s);
4657 return NO_EXIT;
4660 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4662 int l1 = get_field(s->fields, l1) + 1;
4663 TCGv_i32 l;
4665 /* The length must be even and should not exceed 64 bytes. */
4666 if ((l1 & 1) || (l1 > 64)) {
4667 gen_program_exception(s, PGM_SPECIFICATION);
4668 return EXIT_NORETURN;
4670 l = tcg_const_i32(l1);
4671 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4672 tcg_temp_free_i32(l);
4673 set_cc_static(s);
4674 return NO_EXIT;
4678 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4680 int d1 = get_field(s->fields, d1);
4681 int d2 = get_field(s->fields, d2);
4682 int b1 = get_field(s->fields, b1);
4683 int b2 = get_field(s->fields, b2);
4684 int l = get_field(s->fields, l1);
4685 TCGv_i32 t32;
4687 o->addr1 = get_address(s, 0, b1, d1);
4689 /* If the addresses are identical, this is a store/memset of zero. */
4690 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4691 o->in2 = tcg_const_i64(0);
4693 l++;
4694 while (l >= 8) {
4695 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4696 l -= 8;
4697 if (l > 0) {
4698 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4701 if (l >= 4) {
4702 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4703 l -= 4;
4704 if (l > 0) {
4705 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4708 if (l >= 2) {
4709 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4710 l -= 2;
4711 if (l > 0) {
4712 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4715 if (l) {
4716 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4718 gen_op_movi_cc(s, 0);
4719 return NO_EXIT;
4722 /* But in general we'll defer to a helper. */
4723 o->in2 = get_address(s, 0, b2, d2);
4724 t32 = tcg_const_i32(l);
4725 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4726 tcg_temp_free_i32(t32);
4727 set_cc_static(s);
4728 return NO_EXIT;
4731 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4733 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4734 return NO_EXIT;
4737 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4739 int shift = s->insn->data & 0xff;
4740 int size = s->insn->data >> 8;
4741 uint64_t mask = ((1ull << size) - 1) << shift;
4743 assert(!o->g_in2);
4744 tcg_gen_shli_i64(o->in2, o->in2, shift);
4745 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4747 /* Produce the CC from only the bits manipulated. */
4748 tcg_gen_andi_i64(cc_dst, o->out, mask);
4749 set_cc_nz_u64(s, cc_dst);
4750 return NO_EXIT;
4753 static ExitStatus op_xi(DisasContext *s, DisasOps *o)
4755 o->in1 = tcg_temp_new_i64();
4757 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4758 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4759 } else {
4760 /* Perform the atomic operation in memory. */
4761 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4762 s->insn->data);
4765 /* Recompute also for atomic case: needed for setting CC. */
4766 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4768 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4769 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4771 return NO_EXIT;
4774 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4776 o->out = tcg_const_i64(0);
4777 return NO_EXIT;
4780 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4782 o->out = tcg_const_i64(0);
4783 o->out2 = o->out;
4784 o->g_out2 = true;
4785 return NO_EXIT;
4788 #ifndef CONFIG_USER_ONLY
4789 static ExitStatus op_clp(DisasContext *s, DisasOps *o)
4791 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4793 check_privileged(s);
4794 gen_helper_clp(cpu_env, r2);
4795 tcg_temp_free_i32(r2);
4796 set_cc_static(s);
4797 return NO_EXIT;
4800 static ExitStatus op_pcilg(DisasContext *s, DisasOps *o)
4802 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4803 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4805 check_privileged(s);
4806 gen_helper_pcilg(cpu_env, r1, r2);
4807 tcg_temp_free_i32(r1);
4808 tcg_temp_free_i32(r2);
4809 set_cc_static(s);
4810 return NO_EXIT;
4813 static ExitStatus op_pcistg(DisasContext *s, DisasOps *o)
4815 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4816 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4818 check_privileged(s);
4819 gen_helper_pcistg(cpu_env, r1, r2);
4820 tcg_temp_free_i32(r1);
4821 tcg_temp_free_i32(r2);
4822 set_cc_static(s);
4823 return NO_EXIT;
4826 static ExitStatus op_stpcifc(DisasContext *s, DisasOps *o)
4828 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4829 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4831 check_privileged(s);
4832 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4833 tcg_temp_free_i32(ar);
4834 tcg_temp_free_i32(r1);
4835 set_cc_static(s);
4836 return NO_EXIT;
4839 static ExitStatus op_sic(DisasContext *s, DisasOps *o)
4841 check_privileged(s);
4842 gen_helper_sic(cpu_env, o->in1, o->in2);
4843 return NO_EXIT;
4846 static ExitStatus op_rpcit(DisasContext *s, DisasOps *o)
4848 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4849 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4851 check_privileged(s);
4852 gen_helper_rpcit(cpu_env, r1, r2);
4853 tcg_temp_free_i32(r1);
4854 tcg_temp_free_i32(r2);
4855 set_cc_static(s);
4856 return NO_EXIT;
4859 static ExitStatus op_pcistb(DisasContext *s, DisasOps *o)
4861 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4862 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4863 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4865 check_privileged(s);
4866 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4867 tcg_temp_free_i32(ar);
4868 tcg_temp_free_i32(r1);
4869 tcg_temp_free_i32(r3);
4870 set_cc_static(s);
4871 return NO_EXIT;
4874 static ExitStatus op_mpcifc(DisasContext *s, DisasOps *o)
4876 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4877 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4879 check_privileged(s);
4880 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4881 tcg_temp_free_i32(ar);
4882 tcg_temp_free_i32(r1);
4883 set_cc_static(s);
4884 return NO_EXIT;
4886 #endif
4888 /* ====================================================================== */
4889 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4890 the original inputs), update the various cc data structures in order to
4891 be able to compute the new condition code. */
4893 static void cout_abs32(DisasContext *s, DisasOps *o)
4895 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4898 static void cout_abs64(DisasContext *s, DisasOps *o)
4900 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4903 static void cout_adds32(DisasContext *s, DisasOps *o)
4905 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4908 static void cout_adds64(DisasContext *s, DisasOps *o)
4910 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4913 static void cout_addu32(DisasContext *s, DisasOps *o)
4915 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4918 static void cout_addu64(DisasContext *s, DisasOps *o)
4920 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4923 static void cout_addc32(DisasContext *s, DisasOps *o)
4925 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4928 static void cout_addc64(DisasContext *s, DisasOps *o)
4930 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4933 static void cout_cmps32(DisasContext *s, DisasOps *o)
4935 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4938 static void cout_cmps64(DisasContext *s, DisasOps *o)
4940 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4943 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4945 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4948 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4950 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4953 static void cout_f32(DisasContext *s, DisasOps *o)
4955 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4958 static void cout_f64(DisasContext *s, DisasOps *o)
4960 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4963 static void cout_f128(DisasContext *s, DisasOps *o)
4965 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4968 static void cout_nabs32(DisasContext *s, DisasOps *o)
4970 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4973 static void cout_nabs64(DisasContext *s, DisasOps *o)
4975 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4978 static void cout_neg32(DisasContext *s, DisasOps *o)
4980 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4983 static void cout_neg64(DisasContext *s, DisasOps *o)
4985 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4988 static void cout_nz32(DisasContext *s, DisasOps *o)
4990 tcg_gen_ext32u_i64(cc_dst, o->out);
4991 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4994 static void cout_nz64(DisasContext *s, DisasOps *o)
4996 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4999 static void cout_s32(DisasContext *s, DisasOps *o)
5001 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5004 static void cout_s64(DisasContext *s, DisasOps *o)
5006 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5009 static void cout_subs32(DisasContext *s, DisasOps *o)
5011 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5014 static void cout_subs64(DisasContext *s, DisasOps *o)
5016 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5019 static void cout_subu32(DisasContext *s, DisasOps *o)
5021 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5024 static void cout_subu64(DisasContext *s, DisasOps *o)
5026 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5029 static void cout_subb32(DisasContext *s, DisasOps *o)
5031 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5034 static void cout_subb64(DisasContext *s, DisasOps *o)
5036 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5039 static void cout_tm32(DisasContext *s, DisasOps *o)
5041 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5044 static void cout_tm64(DisasContext *s, DisasOps *o)
5046 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5049 /* ====================================================================== */
5050 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5051 with the TCG register to which we will write. Used in combination with
5052 the "wout" generators, in some cases we need a new temporary, and in
5053 some cases we can write to a TCG global. */
5055 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5057 o->out = tcg_temp_new_i64();
5059 #define SPEC_prep_new 0
5061 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5063 o->out = tcg_temp_new_i64();
5064 o->out2 = tcg_temp_new_i64();
5066 #define SPEC_prep_new_P 0
5068 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5070 o->out = regs[get_field(f, r1)];
5071 o->g_out = true;
5073 #define SPEC_prep_r1 0
5075 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5077 int r1 = get_field(f, r1);
5078 o->out = regs[r1];
5079 o->out2 = regs[r1 + 1];
5080 o->g_out = o->g_out2 = true;
5082 #define SPEC_prep_r1_P SPEC_r1_even
5084 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5086 o->out = fregs[get_field(f, r1)];
5087 o->g_out = true;
5089 #define SPEC_prep_f1 0
5091 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5093 int r1 = get_field(f, r1);
5094 o->out = fregs[r1];
5095 o->out2 = fregs[r1 + 2];
5096 o->g_out = o->g_out2 = true;
5098 #define SPEC_prep_x1 SPEC_r1_f128
5100 /* ====================================================================== */
5101 /* The "Write OUTput" generators. These generally perform some non-trivial
5102 copy of data to TCG globals, or to main memory. The trivial cases are
5103 generally handled by having a "prep" generator install the TCG global
5104 as the destination of the operation. */
5106 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5108 store_reg(get_field(f, r1), o->out);
5110 #define SPEC_wout_r1 0
5112 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5114 int r1 = get_field(f, r1);
5115 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5117 #define SPEC_wout_r1_8 0
5119 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5121 int r1 = get_field(f, r1);
5122 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5124 #define SPEC_wout_r1_16 0
5126 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5128 store_reg32_i64(get_field(f, r1), o->out);
5130 #define SPEC_wout_r1_32 0
5132 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5134 store_reg32h_i64(get_field(f, r1), o->out);
5136 #define SPEC_wout_r1_32h 0
5138 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5140 int r1 = get_field(f, r1);
5141 store_reg32_i64(r1, o->out);
5142 store_reg32_i64(r1 + 1, o->out2);
5144 #define SPEC_wout_r1_P32 SPEC_r1_even
5146 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5148 int r1 = get_field(f, r1);
5149 store_reg32_i64(r1 + 1, o->out);
5150 tcg_gen_shri_i64(o->out, o->out, 32);
5151 store_reg32_i64(r1, o->out);
5153 #define SPEC_wout_r1_D32 SPEC_r1_even
5155 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5157 int r3 = get_field(f, r3);
5158 store_reg32_i64(r3, o->out);
5159 store_reg32_i64(r3 + 1, o->out2);
5161 #define SPEC_wout_r3_P32 SPEC_r3_even
5163 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5165 int r3 = get_field(f, r3);
5166 store_reg(r3, o->out);
5167 store_reg(r3 + 1, o->out2);
5169 #define SPEC_wout_r3_P64 SPEC_r3_even
5171 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5173 store_freg32_i64(get_field(f, r1), o->out);
5175 #define SPEC_wout_e1 0
5177 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5179 store_freg(get_field(f, r1), o->out);
5181 #define SPEC_wout_f1 0
5183 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5185 int f1 = get_field(s->fields, r1);
5186 store_freg(f1, o->out);
5187 store_freg(f1 + 2, o->out2);
5189 #define SPEC_wout_x1 SPEC_r1_f128
5191 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5193 if (get_field(f, r1) != get_field(f, r2)) {
5194 store_reg32_i64(get_field(f, r1), o->out);
5197 #define SPEC_wout_cond_r1r2_32 0
5199 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5201 if (get_field(f, r1) != get_field(f, r2)) {
5202 store_freg32_i64(get_field(f, r1), o->out);
5205 #define SPEC_wout_cond_e1e2 0
5207 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5209 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5211 #define SPEC_wout_m1_8 0
5213 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5215 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5217 #define SPEC_wout_m1_16 0
5219 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5221 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5223 #define SPEC_wout_m1_32 0
5225 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5227 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5229 #define SPEC_wout_m1_64 0
5231 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5233 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5235 #define SPEC_wout_m2_32 0
5237 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5239 store_reg(get_field(f, r1), o->in2);
5241 #define SPEC_wout_in2_r1 0
5243 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5245 store_reg32_i64(get_field(f, r1), o->in2);
5247 #define SPEC_wout_in2_r1_32 0
5249 /* ====================================================================== */
5250 /* The "INput 1" generators. These load the first operand to an insn. */
5252 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5254 o->in1 = load_reg(get_field(f, r1));
5256 #define SPEC_in1_r1 0
5258 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5260 o->in1 = regs[get_field(f, r1)];
5261 o->g_in1 = true;
5263 #define SPEC_in1_r1_o 0
5265 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5267 o->in1 = tcg_temp_new_i64();
5268 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5270 #define SPEC_in1_r1_32s 0
5272 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5274 o->in1 = tcg_temp_new_i64();
5275 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5277 #define SPEC_in1_r1_32u 0
5279 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5281 o->in1 = tcg_temp_new_i64();
5282 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5284 #define SPEC_in1_r1_sr32 0
5286 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5288 o->in1 = load_reg(get_field(f, r1) + 1);
5290 #define SPEC_in1_r1p1 SPEC_r1_even
5292 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5294 o->in1 = tcg_temp_new_i64();
5295 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5297 #define SPEC_in1_r1p1_32s SPEC_r1_even
5299 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5301 o->in1 = tcg_temp_new_i64();
5302 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5304 #define SPEC_in1_r1p1_32u SPEC_r1_even
5306 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5308 int r1 = get_field(f, r1);
5309 o->in1 = tcg_temp_new_i64();
5310 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5312 #define SPEC_in1_r1_D32 SPEC_r1_even
5314 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5316 o->in1 = load_reg(get_field(f, r2));
5318 #define SPEC_in1_r2 0
5320 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5322 o->in1 = tcg_temp_new_i64();
5323 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5325 #define SPEC_in1_r2_sr32 0
5327 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5329 o->in1 = load_reg(get_field(f, r3));
5331 #define SPEC_in1_r3 0
5333 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5335 o->in1 = regs[get_field(f, r3)];
5336 o->g_in1 = true;
5338 #define SPEC_in1_r3_o 0
5340 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5342 o->in1 = tcg_temp_new_i64();
5343 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5345 #define SPEC_in1_r3_32s 0
5347 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5349 o->in1 = tcg_temp_new_i64();
5350 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5352 #define SPEC_in1_r3_32u 0
5354 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5356 int r3 = get_field(f, r3);
5357 o->in1 = tcg_temp_new_i64();
5358 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5360 #define SPEC_in1_r3_D32 SPEC_r3_even
5362 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5364 o->in1 = load_freg32_i64(get_field(f, r1));
5366 #define SPEC_in1_e1 0
5368 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5370 o->in1 = fregs[get_field(f, r1)];
5371 o->g_in1 = true;
5373 #define SPEC_in1_f1_o 0
5375 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5377 int r1 = get_field(f, r1);
5378 o->out = fregs[r1];
5379 o->out2 = fregs[r1 + 2];
5380 o->g_out = o->g_out2 = true;
5382 #define SPEC_in1_x1_o SPEC_r1_f128
5384 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5386 o->in1 = fregs[get_field(f, r3)];
5387 o->g_in1 = true;
5389 #define SPEC_in1_f3_o 0
5391 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5393 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5395 #define SPEC_in1_la1 0
5397 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5399 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5400 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5402 #define SPEC_in1_la2 0
5404 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5406 in1_la1(s, f, o);
5407 o->in1 = tcg_temp_new_i64();
5408 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5410 #define SPEC_in1_m1_8u 0
5412 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5414 in1_la1(s, f, o);
5415 o->in1 = tcg_temp_new_i64();
5416 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5418 #define SPEC_in1_m1_16s 0
5420 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5422 in1_la1(s, f, o);
5423 o->in1 = tcg_temp_new_i64();
5424 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5426 #define SPEC_in1_m1_16u 0
5428 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5430 in1_la1(s, f, o);
5431 o->in1 = tcg_temp_new_i64();
5432 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5434 #define SPEC_in1_m1_32s 0
5436 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5438 in1_la1(s, f, o);
5439 o->in1 = tcg_temp_new_i64();
5440 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5442 #define SPEC_in1_m1_32u 0
5444 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5446 in1_la1(s, f, o);
5447 o->in1 = tcg_temp_new_i64();
5448 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5450 #define SPEC_in1_m1_64 0
5452 /* ====================================================================== */
5453 /* The "INput 2" generators. These load the second operand to an insn. */
5455 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5457 o->in2 = regs[get_field(f, r1)];
5458 o->g_in2 = true;
5460 #define SPEC_in2_r1_o 0
5462 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5464 o->in2 = tcg_temp_new_i64();
5465 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5467 #define SPEC_in2_r1_16u 0
5469 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5471 o->in2 = tcg_temp_new_i64();
5472 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5474 #define SPEC_in2_r1_32u 0
5476 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5478 int r1 = get_field(f, r1);
5479 o->in2 = tcg_temp_new_i64();
5480 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5482 #define SPEC_in2_r1_D32 SPEC_r1_even
5484 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5486 o->in2 = load_reg(get_field(f, r2));
5488 #define SPEC_in2_r2 0
5490 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5492 o->in2 = regs[get_field(f, r2)];
5493 o->g_in2 = true;
5495 #define SPEC_in2_r2_o 0
5497 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5499 int r2 = get_field(f, r2);
5500 if (r2 != 0) {
5501 o->in2 = load_reg(r2);
5504 #define SPEC_in2_r2_nz 0
5506 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5508 o->in2 = tcg_temp_new_i64();
5509 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5511 #define SPEC_in2_r2_8s 0
5513 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5515 o->in2 = tcg_temp_new_i64();
5516 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5518 #define SPEC_in2_r2_8u 0
5520 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5522 o->in2 = tcg_temp_new_i64();
5523 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5525 #define SPEC_in2_r2_16s 0
5527 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5529 o->in2 = tcg_temp_new_i64();
5530 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5532 #define SPEC_in2_r2_16u 0
5534 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5536 o->in2 = load_reg(get_field(f, r3));
5538 #define SPEC_in2_r3 0
5540 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5542 o->in2 = tcg_temp_new_i64();
5543 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5545 #define SPEC_in2_r3_sr32 0
5547 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5549 o->in2 = tcg_temp_new_i64();
5550 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5552 #define SPEC_in2_r2_32s 0
5554 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5556 o->in2 = tcg_temp_new_i64();
5557 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5559 #define SPEC_in2_r2_32u 0
5561 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5563 o->in2 = tcg_temp_new_i64();
5564 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5566 #define SPEC_in2_r2_sr32 0
5568 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5570 o->in2 = load_freg32_i64(get_field(f, r2));
5572 #define SPEC_in2_e2 0
5574 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5576 o->in2 = fregs[get_field(f, r2)];
5577 o->g_in2 = true;
5579 #define SPEC_in2_f2_o 0
5581 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5583 int r2 = get_field(f, r2);
5584 o->in1 = fregs[r2];
5585 o->in2 = fregs[r2 + 2];
5586 o->g_in1 = o->g_in2 = true;
5588 #define SPEC_in2_x2_o SPEC_r2_f128
5590 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5592 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5594 #define SPEC_in2_ra2 0
5596 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5598 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5599 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5601 #define SPEC_in2_a2 0
5603 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5605 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5607 #define SPEC_in2_ri2 0
5609 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5611 help_l2_shift(s, f, o, 31);
5613 #define SPEC_in2_sh32 0
5615 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5617 help_l2_shift(s, f, o, 63);
5619 #define SPEC_in2_sh64 0
5621 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5623 in2_a2(s, f, o);
5624 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5626 #define SPEC_in2_m2_8u 0
5628 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5630 in2_a2(s, f, o);
5631 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5633 #define SPEC_in2_m2_16s 0
5635 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5637 in2_a2(s, f, o);
5638 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5640 #define SPEC_in2_m2_16u 0
5642 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5644 in2_a2(s, f, o);
5645 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5647 #define SPEC_in2_m2_32s 0
5649 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5651 in2_a2(s, f, o);
5652 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5654 #define SPEC_in2_m2_32u 0
5656 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5658 in2_a2(s, f, o);
5659 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5661 #define SPEC_in2_m2_64 0
5663 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5665 in2_ri2(s, f, o);
5666 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5668 #define SPEC_in2_mri2_16u 0
5670 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5672 in2_ri2(s, f, o);
5673 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5675 #define SPEC_in2_mri2_32s 0
5677 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5679 in2_ri2(s, f, o);
5680 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5682 #define SPEC_in2_mri2_32u 0
5684 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5686 in2_ri2(s, f, o);
5687 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5689 #define SPEC_in2_mri2_64 0
5691 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5693 o->in2 = tcg_const_i64(get_field(f, i2));
5695 #define SPEC_in2_i2 0
5697 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5699 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5701 #define SPEC_in2_i2_8u 0
5703 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5705 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5707 #define SPEC_in2_i2_16u 0
5709 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5711 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5713 #define SPEC_in2_i2_32u 0
5715 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5717 uint64_t i2 = (uint16_t)get_field(f, i2);
5718 o->in2 = tcg_const_i64(i2 << s->insn->data);
5720 #define SPEC_in2_i2_16u_shl 0
5722 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5724 uint64_t i2 = (uint32_t)get_field(f, i2);
5725 o->in2 = tcg_const_i64(i2 << s->insn->data);
5727 #define SPEC_in2_i2_32u_shl 0
5729 #ifndef CONFIG_USER_ONLY
5730 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5732 o->in2 = tcg_const_i64(s->fields->raw_insn);
5734 #define SPEC_in2_insn 0
5735 #endif
5737 /* ====================================================================== */
5739 /* Find opc within the table of insns. This is formulated as a switch
5740 statement so that (1) we get compile-time notice of cut-paste errors
5741 for duplicated opcodes, and (2) the compiler generates the binary
5742 search tree, rather than us having to post-process the table. */
5744 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5745 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5747 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5749 enum DisasInsnEnum {
5750 #include "insn-data.def"
5753 #undef D
5754 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5755 .opc = OPC, \
5756 .fmt = FMT_##FT, \
5757 .fac = FAC_##FC, \
5758 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5759 .name = #NM, \
5760 .help_in1 = in1_##I1, \
5761 .help_in2 = in2_##I2, \
5762 .help_prep = prep_##P, \
5763 .help_wout = wout_##W, \
5764 .help_cout = cout_##CC, \
5765 .help_op = op_##OP, \
5766 .data = D \
5769 /* Allow 0 to be used for NULL in the table below. */
5770 #define in1_0 NULL
5771 #define in2_0 NULL
5772 #define prep_0 NULL
5773 #define wout_0 NULL
5774 #define cout_0 NULL
5775 #define op_0 NULL
5777 #define SPEC_in1_0 0
5778 #define SPEC_in2_0 0
5779 #define SPEC_prep_0 0
5780 #define SPEC_wout_0 0
5782 /* Give smaller names to the various facilities. */
5783 #define FAC_Z S390_FEAT_ZARCH
5784 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5785 #define FAC_DFP S390_FEAT_DFP
5786 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5787 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5788 #define FAC_EE S390_FEAT_EXECUTE_EXT
5789 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5790 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5791 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5792 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5793 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5794 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5795 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5796 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5797 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5798 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5799 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5800 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5801 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5802 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5803 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5804 #define FAC_SFLE S390_FEAT_STFLE
5805 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5806 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5807 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5808 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5809 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5810 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5811 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5812 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5813 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5814 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5815 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5816 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5817 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5818 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5819 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5820 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5822 static const DisasInsn insn_info[] = {
5823 #include "insn-data.def"
5826 #undef D
5827 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5828 case OPC: return &insn_info[insn_ ## NM];
5830 static const DisasInsn *lookup_opc(uint16_t opc)
5832 switch (opc) {
5833 #include "insn-data.def"
5834 default:
5835 return NULL;
5839 #undef D
5840 #undef C
5842 /* Extract a field from the insn. The INSN should be left-aligned in
5843 the uint64_t so that we can more easily utilize the big-bit-endian
5844 definitions we extract from the Principals of Operation. */
5846 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5848 uint32_t r, m;
5850 if (f->size == 0) {
5851 return;
5854 /* Zero extract the field from the insn. */
5855 r = (insn << f->beg) >> (64 - f->size);
5857 /* Sign-extend, or un-swap the field as necessary. */
5858 switch (f->type) {
5859 case 0: /* unsigned */
5860 break;
5861 case 1: /* signed */
5862 assert(f->size <= 32);
5863 m = 1u << (f->size - 1);
5864 r = (r ^ m) - m;
5865 break;
5866 case 2: /* dl+dh split, signed 20 bit. */
5867 r = ((int8_t)r << 12) | (r >> 8);
5868 break;
5869 default:
5870 abort();
5873 /* Validate that the "compressed" encoding we selected above is valid.
5874 I.e. we havn't make two different original fields overlap. */
5875 assert(((o->presentC >> f->indexC) & 1) == 0);
5876 o->presentC |= 1 << f->indexC;
5877 o->presentO |= 1 << f->indexO;
5879 o->c[f->indexC] = r;
5882 /* Lookup the insn at the current PC, extracting the operands into O and
5883 returning the info struct for the insn. Returns NULL for invalid insn. */
5885 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5886 DisasFields *f)
5888 uint64_t insn, pc = s->pc;
5889 int op, op2, ilen;
5890 const DisasInsn *info;
5892 if (unlikely(s->ex_value)) {
5893 /* Drop the EX data now, so that it's clear on exception paths. */
5894 TCGv_i64 zero = tcg_const_i64(0);
5895 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5896 tcg_temp_free_i64(zero);
5898 /* Extract the values saved by EXECUTE. */
5899 insn = s->ex_value & 0xffffffffffff0000ull;
5900 ilen = s->ex_value & 0xf;
5901 op = insn >> 56;
5902 } else {
5903 insn = ld_code2(env, pc);
5904 op = (insn >> 8) & 0xff;
5905 ilen = get_ilen(op);
5906 switch (ilen) {
5907 case 2:
5908 insn = insn << 48;
5909 break;
5910 case 4:
5911 insn = ld_code4(env, pc) << 32;
5912 break;
5913 case 6:
5914 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5915 break;
5916 default:
5917 g_assert_not_reached();
5920 s->next_pc = s->pc + ilen;
5921 s->ilen = ilen;
5923 /* We can't actually determine the insn format until we've looked up
5924 the full insn opcode. Which we can't do without locating the
5925 secondary opcode. Assume by default that OP2 is at bit 40; for
5926 those smaller insns that don't actually have a secondary opcode
5927 this will correctly result in OP2 = 0. */
5928 switch (op) {
5929 case 0x01: /* E */
5930 case 0x80: /* S */
5931 case 0x82: /* S */
5932 case 0x93: /* S */
5933 case 0xb2: /* S, RRF, RRE, IE */
5934 case 0xb3: /* RRE, RRD, RRF */
5935 case 0xb9: /* RRE, RRF */
5936 case 0xe5: /* SSE, SIL */
5937 op2 = (insn << 8) >> 56;
5938 break;
5939 case 0xa5: /* RI */
5940 case 0xa7: /* RI */
5941 case 0xc0: /* RIL */
5942 case 0xc2: /* RIL */
5943 case 0xc4: /* RIL */
5944 case 0xc6: /* RIL */
5945 case 0xc8: /* SSF */
5946 case 0xcc: /* RIL */
5947 op2 = (insn << 12) >> 60;
5948 break;
5949 case 0xc5: /* MII */
5950 case 0xc7: /* SMI */
5951 case 0xd0 ... 0xdf: /* SS */
5952 case 0xe1: /* SS */
5953 case 0xe2: /* SS */
5954 case 0xe8: /* SS */
5955 case 0xe9: /* SS */
5956 case 0xea: /* SS */
5957 case 0xee ... 0xf3: /* SS */
5958 case 0xf8 ... 0xfd: /* SS */
5959 op2 = 0;
5960 break;
5961 default:
5962 op2 = (insn << 40) >> 56;
5963 break;
5966 memset(f, 0, sizeof(*f));
5967 f->raw_insn = insn;
5968 f->op = op;
5969 f->op2 = op2;
5971 /* Lookup the instruction. */
5972 info = lookup_opc(op << 8 | op2);
5974 /* If we found it, extract the operands. */
5975 if (info != NULL) {
5976 DisasFormat fmt = info->fmt;
5977 int i;
5979 for (i = 0; i < NUM_C_FIELD; ++i) {
5980 extract_field(f, &format_info[fmt].op[i], insn);
5983 return info;
5986 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5988 const DisasInsn *insn;
5989 ExitStatus ret = NO_EXIT;
5990 DisasFields f;
5991 DisasOps o;
5993 /* Search for the insn in the table. */
5994 insn = extract_insn(env, s, &f);
5996 /* Not found means unimplemented/illegal opcode. */
5997 if (insn == NULL) {
5998 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5999 f.op, f.op2);
6000 gen_illegal_opcode(s);
6001 return EXIT_NORETURN;
6004 #ifndef CONFIG_USER_ONLY
6005 if (s->tb->flags & FLAG_MASK_PER) {
6006 TCGv_i64 addr = tcg_const_i64(s->pc);
6007 gen_helper_per_ifetch(cpu_env, addr);
6008 tcg_temp_free_i64(addr);
6010 #endif
6012 /* Check for insn specification exceptions. */
6013 if (insn->spec) {
6014 int spec = insn->spec, excp = 0, r;
6016 if (spec & SPEC_r1_even) {
6017 r = get_field(&f, r1);
6018 if (r & 1) {
6019 excp = PGM_SPECIFICATION;
6022 if (spec & SPEC_r2_even) {
6023 r = get_field(&f, r2);
6024 if (r & 1) {
6025 excp = PGM_SPECIFICATION;
6028 if (spec & SPEC_r3_even) {
6029 r = get_field(&f, r3);
6030 if (r & 1) {
6031 excp = PGM_SPECIFICATION;
6034 if (spec & SPEC_r1_f128) {
6035 r = get_field(&f, r1);
6036 if (r > 13) {
6037 excp = PGM_SPECIFICATION;
6040 if (spec & SPEC_r2_f128) {
6041 r = get_field(&f, r2);
6042 if (r > 13) {
6043 excp = PGM_SPECIFICATION;
6046 if (excp) {
6047 gen_program_exception(s, excp);
6048 return EXIT_NORETURN;
6052 /* Set up the strutures we use to communicate with the helpers. */
6053 s->insn = insn;
6054 s->fields = &f;
6055 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6056 o.out = NULL;
6057 o.out2 = NULL;
6058 o.in1 = NULL;
6059 o.in2 = NULL;
6060 o.addr1 = NULL;
6062 /* Implement the instruction. */
6063 if (insn->help_in1) {
6064 insn->help_in1(s, &f, &o);
6066 if (insn->help_in2) {
6067 insn->help_in2(s, &f, &o);
6069 if (insn->help_prep) {
6070 insn->help_prep(s, &f, &o);
6072 if (insn->help_op) {
6073 ret = insn->help_op(s, &o);
6075 if (insn->help_wout) {
6076 insn->help_wout(s, &f, &o);
6078 if (insn->help_cout) {
6079 insn->help_cout(s, &o);
6082 /* Free any temporaries created by the helpers. */
6083 if (o.out && !o.g_out) {
6084 tcg_temp_free_i64(o.out);
6086 if (o.out2 && !o.g_out2) {
6087 tcg_temp_free_i64(o.out2);
6089 if (o.in1 && !o.g_in1) {
6090 tcg_temp_free_i64(o.in1);
6092 if (o.in2 && !o.g_in2) {
6093 tcg_temp_free_i64(o.in2);
6095 if (o.addr1) {
6096 tcg_temp_free_i64(o.addr1);
6099 #ifndef CONFIG_USER_ONLY
6100 if (s->tb->flags & FLAG_MASK_PER) {
6101 /* An exception might be triggered, save PSW if not already done. */
6102 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
6103 tcg_gen_movi_i64(psw_addr, s->next_pc);
6106 /* Call the helper to check for a possible PER exception. */
6107 gen_helper_per_check_exception(cpu_env);
6109 #endif
6111 /* Advance to the next instruction. */
6112 s->pc = s->next_pc;
6113 return ret;
6116 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
6118 CPUS390XState *env = cs->env_ptr;
6119 DisasContext dc;
6120 target_ulong pc_start;
6121 uint64_t next_page_start;
6122 int num_insns, max_insns;
6123 ExitStatus status;
6124 bool do_debug;
6126 pc_start = tb->pc;
6128 /* 31-bit mode */
6129 if (!(tb->flags & FLAG_MASK_64)) {
6130 pc_start &= 0x7fffffff;
6133 dc.tb = tb;
6134 dc.pc = pc_start;
6135 dc.cc_op = CC_OP_DYNAMIC;
6136 dc.ex_value = tb->cs_base;
6137 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
6139 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
6141 num_insns = 0;
6142 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
6143 if (max_insns == 0) {
6144 max_insns = CF_COUNT_MASK;
6146 if (max_insns > TCG_MAX_INSNS) {
6147 max_insns = TCG_MAX_INSNS;
6150 gen_tb_start(tb);
6152 do {
6153 tcg_gen_insn_start(dc.pc, dc.cc_op);
6154 num_insns++;
6156 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
6157 status = EXIT_PC_STALE;
6158 do_debug = true;
6159 /* The address covered by the breakpoint must be included in
6160 [tb->pc, tb->pc + tb->size) in order to for it to be
6161 properly cleared -- thus we increment the PC here so that
6162 the logic setting tb->size below does the right thing. */
6163 dc.pc += 2;
6164 break;
6167 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
6168 gen_io_start();
6171 status = translate_one(env, &dc);
6173 /* If we reach a page boundary, are single stepping,
6174 or exhaust instruction count, stop generation. */
6175 if (status == NO_EXIT
6176 && (dc.pc >= next_page_start
6177 || tcg_op_buf_full()
6178 || num_insns >= max_insns
6179 || singlestep
6180 || cs->singlestep_enabled
6181 || dc.ex_value)) {
6182 status = EXIT_PC_STALE;
6184 } while (status == NO_EXIT);
6186 if (tb_cflags(tb) & CF_LAST_IO) {
6187 gen_io_end();
6190 switch (status) {
6191 case EXIT_GOTO_TB:
6192 case EXIT_NORETURN:
6193 break;
6194 case EXIT_PC_STALE:
6195 case EXIT_PC_STALE_NOCHAIN:
6196 update_psw_addr(&dc);
6197 /* FALLTHRU */
6198 case EXIT_PC_UPDATED:
6199 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6200 cc op type is in env */
6201 update_cc_op(&dc);
6202 /* FALLTHRU */
6203 case EXIT_PC_CC_UPDATED:
6204 /* Exit the TB, either by raising a debug exception or by return. */
6205 if (do_debug) {
6206 gen_exception(EXCP_DEBUG);
6207 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
6208 tcg_gen_exit_tb(0);
6209 } else {
6210 tcg_gen_lookup_and_goto_ptr();
6212 break;
6213 default:
6214 g_assert_not_reached();
6217 gen_tb_end(tb, num_insns);
6219 tb->size = dc.pc - pc_start;
6220 tb->icount = num_insns;
6222 #if defined(S390X_DEBUG_DISAS)
6223 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
6224 && qemu_log_in_addr_range(pc_start)) {
6225 qemu_log_lock();
6226 if (unlikely(dc.ex_value)) {
6227 /* ??? Unfortunately log_target_disas can't use host memory. */
6228 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
6229 } else {
6230 qemu_log("IN: %s\n", lookup_symbol(pc_start));
6231 log_target_disas(cs, pc_start, dc.pc - pc_start);
6232 qemu_log("\n");
6234 qemu_log_unlock();
6236 #endif
6239 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6240 target_ulong *data)
6242 int cc_op = data[1];
6243 env->psw.addr = data[0];
6244 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6245 env->cc_op = cc_op;