valgrind/i386: avoid false positives on KVM_SET_MSRS ioctl
[qemu.git] / target-s390x / translate.c
blobdbf1993d4653f44bdb772ddc73b163bda732a43a
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
74 #define DISAS_EXCP 4
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
88 return pc;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
124 #ifndef CONFIG_USER_ONLY
125 for (i = 0; i < 16; i++) {
126 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
127 if ((i % 4) == 3) {
128 cpu_fprintf(f, "\n");
129 } else {
130 cpu_fprintf(f, " ");
133 #endif
135 #ifdef DEBUG_INLINE_BRANCHES
136 for (i = 0; i < CC_OP_MAX; i++) {
137 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
138 inline_branch_miss[i], inline_branch_hit[i]);
140 #endif
142 cpu_fprintf(f, "\n");
145 static TCGv_i64 psw_addr;
146 static TCGv_i64 psw_mask;
148 static TCGv_i32 cc_op;
149 static TCGv_i64 cc_src;
150 static TCGv_i64 cc_dst;
151 static TCGv_i64 cc_vr;
153 static char cpu_reg_names[32][4];
154 static TCGv_i64 regs[16];
155 static TCGv_i64 fregs[16];
157 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
159 void s390x_translate_init(void)
161 int i;
163 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
164 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
165 offsetof(CPUS390XState, psw.addr),
166 "psw_addr");
167 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
168 offsetof(CPUS390XState, psw.mask),
169 "psw_mask");
171 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
172 "cc_op");
173 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
174 "cc_src");
175 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
176 "cc_dst");
177 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
178 "cc_vr");
180 for (i = 0; i < 16; i++) {
181 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
182 regs[i] = tcg_global_mem_new(TCG_AREG0,
183 offsetof(CPUS390XState, regs[i]),
184 cpu_reg_names[i]);
187 for (i = 0; i < 16; i++) {
188 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
189 fregs[i] = tcg_global_mem_new(TCG_AREG0,
190 offsetof(CPUS390XState, fregs[i].d),
191 cpu_reg_names[i + 16]);
195 static TCGv_i64 load_reg(int reg)
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
202 static TCGv_i64 load_freg32_i64(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
209 static void store_reg(int reg, TCGv_i64 v)
211 tcg_gen_mov_i64(regs[reg], v);
214 static void store_freg(int reg, TCGv_i64 v)
216 tcg_gen_mov_i64(fregs[reg], v);
219 static void store_reg32_i64(int reg, TCGv_i64 v)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
230 static void store_freg32_i64(int reg, TCGv_i64 v)
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
235 static void return_low128(TCGv_i64 dest)
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
240 static void update_psw_addr(DisasContext *s)
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
246 static void update_cc_op(DisasContext *s)
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
253 static void potential_page_fault(DisasContext *s)
255 update_psw_addr(s);
256 update_cc_op(s);
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
261 return (uint64_t)cpu_lduw_code(env, pc);
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
269 static int get_mem_index(DisasContext *s)
271 switch (s->tb->flags & FLAG_MASK_ASC) {
272 case PSW_ASC_PRIMARY >> 32:
273 return 0;
274 case PSW_ASC_SECONDARY >> 32:
275 return 1;
276 case PSW_ASC_HOME >> 32:
277 return 2;
278 default:
279 tcg_abort();
280 break;
284 static void gen_exception(int excp)
286 TCGv_i32 tmp = tcg_const_i32(excp);
287 gen_helper_exception(cpu_env, tmp);
288 tcg_temp_free_i32(tmp);
291 static void gen_program_exception(DisasContext *s, int code)
293 TCGv_i32 tmp;
295 /* Remember what pgm exeption this was. */
296 tmp = tcg_const_i32(code);
297 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
298 tcg_temp_free_i32(tmp);
300 tmp = tcg_const_i32(s->next_pc - s->pc);
301 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
302 tcg_temp_free_i32(tmp);
304 /* Advance past instruction. */
305 s->pc = s->next_pc;
306 update_psw_addr(s);
308 /* Save off cc. */
309 update_cc_op(s);
311 /* Trigger exception. */
312 gen_exception(EXCP_PGM);
315 static inline void gen_illegal_opcode(DisasContext *s)
317 gen_program_exception(s, PGM_SPECIFICATION);
320 static inline void check_privileged(DisasContext *s)
322 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
323 gen_program_exception(s, PGM_PRIVILEGED);
327 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
329 TCGv_i64 tmp = tcg_temp_new_i64();
330 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
332 /* Note that d2 is limited to 20 bits, signed. If we crop negative
333 displacements early we create larger immedate addends. */
335 /* Note that addi optimizes the imm==0 case. */
336 if (b2 && x2) {
337 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
338 tcg_gen_addi_i64(tmp, tmp, d2);
339 } else if (b2) {
340 tcg_gen_addi_i64(tmp, regs[b2], d2);
341 } else if (x2) {
342 tcg_gen_addi_i64(tmp, regs[x2], d2);
343 } else {
344 if (need_31) {
345 d2 &= 0x7fffffff;
346 need_31 = false;
348 tcg_gen_movi_i64(tmp, d2);
350 if (need_31) {
351 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
354 return tmp;
357 static inline bool live_cc_data(DisasContext *s)
359 return (s->cc_op != CC_OP_DYNAMIC
360 && s->cc_op != CC_OP_STATIC
361 && s->cc_op > 3);
364 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
366 if (live_cc_data(s)) {
367 tcg_gen_discard_i64(cc_src);
368 tcg_gen_discard_i64(cc_dst);
369 tcg_gen_discard_i64(cc_vr);
371 s->cc_op = CC_OP_CONST0 + val;
374 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
376 if (live_cc_data(s)) {
377 tcg_gen_discard_i64(cc_src);
378 tcg_gen_discard_i64(cc_vr);
380 tcg_gen_mov_i64(cc_dst, dst);
381 s->cc_op = op;
384 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
385 TCGv_i64 dst)
387 if (live_cc_data(s)) {
388 tcg_gen_discard_i64(cc_vr);
390 tcg_gen_mov_i64(cc_src, src);
391 tcg_gen_mov_i64(cc_dst, dst);
392 s->cc_op = op;
395 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
396 TCGv_i64 dst, TCGv_i64 vr)
398 tcg_gen_mov_i64(cc_src, src);
399 tcg_gen_mov_i64(cc_dst, dst);
400 tcg_gen_mov_i64(cc_vr, vr);
401 s->cc_op = op;
404 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
406 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
409 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
411 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
414 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
416 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
419 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
421 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
424 /* CC value is in env->cc_op */
425 static void set_cc_static(DisasContext *s)
427 if (live_cc_data(s)) {
428 tcg_gen_discard_i64(cc_src);
429 tcg_gen_discard_i64(cc_dst);
430 tcg_gen_discard_i64(cc_vr);
432 s->cc_op = CC_OP_STATIC;
435 /* calculates cc into cc_op */
436 static void gen_op_calc_cc(DisasContext *s)
438 TCGv_i32 local_cc_op;
439 TCGv_i64 dummy;
441 TCGV_UNUSED_I32(local_cc_op);
442 TCGV_UNUSED_I64(dummy);
443 switch (s->cc_op) {
444 default:
445 dummy = tcg_const_i64(0);
446 /* FALLTHRU */
447 case CC_OP_ADD_64:
448 case CC_OP_ADDU_64:
449 case CC_OP_ADDC_64:
450 case CC_OP_SUB_64:
451 case CC_OP_SUBU_64:
452 case CC_OP_SUBB_64:
453 case CC_OP_ADD_32:
454 case CC_OP_ADDU_32:
455 case CC_OP_ADDC_32:
456 case CC_OP_SUB_32:
457 case CC_OP_SUBU_32:
458 case CC_OP_SUBB_32:
459 local_cc_op = tcg_const_i32(s->cc_op);
460 break;
461 case CC_OP_CONST0:
462 case CC_OP_CONST1:
463 case CC_OP_CONST2:
464 case CC_OP_CONST3:
465 case CC_OP_STATIC:
466 case CC_OP_DYNAMIC:
467 break;
470 switch (s->cc_op) {
471 case CC_OP_CONST0:
472 case CC_OP_CONST1:
473 case CC_OP_CONST2:
474 case CC_OP_CONST3:
475 /* s->cc_op is the cc value */
476 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
477 break;
478 case CC_OP_STATIC:
479 /* env->cc_op already is the cc value */
480 break;
481 case CC_OP_NZ:
482 case CC_OP_ABS_64:
483 case CC_OP_NABS_64:
484 case CC_OP_ABS_32:
485 case CC_OP_NABS_32:
486 case CC_OP_LTGT0_32:
487 case CC_OP_LTGT0_64:
488 case CC_OP_COMP_32:
489 case CC_OP_COMP_64:
490 case CC_OP_NZ_F32:
491 case CC_OP_NZ_F64:
492 case CC_OP_FLOGR:
493 /* 1 argument */
494 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
495 break;
496 case CC_OP_ICM:
497 case CC_OP_LTGT_32:
498 case CC_OP_LTGT_64:
499 case CC_OP_LTUGTU_32:
500 case CC_OP_LTUGTU_64:
501 case CC_OP_TM_32:
502 case CC_OP_TM_64:
503 case CC_OP_SLA_32:
504 case CC_OP_SLA_64:
505 case CC_OP_NZ_F128:
506 /* 2 arguments */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
508 break;
509 case CC_OP_ADD_64:
510 case CC_OP_ADDU_64:
511 case CC_OP_ADDC_64:
512 case CC_OP_SUB_64:
513 case CC_OP_SUBU_64:
514 case CC_OP_SUBB_64:
515 case CC_OP_ADD_32:
516 case CC_OP_ADDU_32:
517 case CC_OP_ADDC_32:
518 case CC_OP_SUB_32:
519 case CC_OP_SUBU_32:
520 case CC_OP_SUBB_32:
521 /* 3 arguments */
522 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
523 break;
524 case CC_OP_DYNAMIC:
525 /* unknown operation - assume 3 arguments and cc_op in env */
526 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
527 break;
528 default:
529 tcg_abort();
532 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
533 tcg_temp_free_i32(local_cc_op);
535 if (!TCGV_IS_UNUSED_I64(dummy)) {
536 tcg_temp_free_i64(dummy);
539 /* We now have cc in cc_op as constant */
540 set_cc_static(s);
543 static int use_goto_tb(DisasContext *s, uint64_t dest)
545 /* NOTE: we handle the case where the TB spans two pages here */
546 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
547 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
548 && !s->singlestep_enabled
549 && !(s->tb->cflags & CF_LAST_IO));
552 static void account_noninline_branch(DisasContext *s, int cc_op)
554 #ifdef DEBUG_INLINE_BRANCHES
555 inline_branch_miss[cc_op]++;
556 #endif
559 static void account_inline_branch(DisasContext *s, int cc_op)
561 #ifdef DEBUG_INLINE_BRANCHES
562 inline_branch_hit[cc_op]++;
563 #endif
566 /* Table of mask values to comparison codes, given a comparison as input.
567 For such, CC=3 should not be possible. */
568 static const TCGCond ltgt_cond[16] = {
569 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
570 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
571 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
572 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
573 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
574 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
575 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
576 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
579 /* Table of mask values to comparison codes, given a logic op as input.
580 For such, only CC=0 and CC=1 should be possible. */
581 static const TCGCond nz_cond[16] = {
582 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
583 TCG_COND_NEVER, TCG_COND_NEVER,
584 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
585 TCG_COND_NE, TCG_COND_NE,
586 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
587 TCG_COND_EQ, TCG_COND_EQ,
588 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
589 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
592 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
593 details required to generate a TCG comparison. */
594 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
596 TCGCond cond;
597 enum cc_op old_cc_op = s->cc_op;
599 if (mask == 15 || mask == 0) {
600 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
601 c->u.s32.a = cc_op;
602 c->u.s32.b = cc_op;
603 c->g1 = c->g2 = true;
604 c->is_64 = false;
605 return;
608 /* Find the TCG condition for the mask + cc op. */
609 switch (old_cc_op) {
610 case CC_OP_LTGT0_32:
611 case CC_OP_LTGT0_64:
612 case CC_OP_LTGT_32:
613 case CC_OP_LTGT_64:
614 cond = ltgt_cond[mask];
615 if (cond == TCG_COND_NEVER) {
616 goto do_dynamic;
618 account_inline_branch(s, old_cc_op);
619 break;
621 case CC_OP_LTUGTU_32:
622 case CC_OP_LTUGTU_64:
623 cond = tcg_unsigned_cond(ltgt_cond[mask]);
624 if (cond == TCG_COND_NEVER) {
625 goto do_dynamic;
627 account_inline_branch(s, old_cc_op);
628 break;
630 case CC_OP_NZ:
631 cond = nz_cond[mask];
632 if (cond == TCG_COND_NEVER) {
633 goto do_dynamic;
635 account_inline_branch(s, old_cc_op);
636 break;
638 case CC_OP_TM_32:
639 case CC_OP_TM_64:
640 switch (mask) {
641 case 8:
642 cond = TCG_COND_EQ;
643 break;
644 case 4 | 2 | 1:
645 cond = TCG_COND_NE;
646 break;
647 default:
648 goto do_dynamic;
650 account_inline_branch(s, old_cc_op);
651 break;
653 case CC_OP_ICM:
654 switch (mask) {
655 case 8:
656 cond = TCG_COND_EQ;
657 break;
658 case 4 | 2 | 1:
659 case 4 | 2:
660 cond = TCG_COND_NE;
661 break;
662 default:
663 goto do_dynamic;
665 account_inline_branch(s, old_cc_op);
666 break;
668 case CC_OP_FLOGR:
669 switch (mask & 0xa) {
670 case 8: /* src == 0 -> no one bit found */
671 cond = TCG_COND_EQ;
672 break;
673 case 2: /* src != 0 -> one bit found */
674 cond = TCG_COND_NE;
675 break;
676 default:
677 goto do_dynamic;
679 account_inline_branch(s, old_cc_op);
680 break;
682 case CC_OP_ADDU_32:
683 case CC_OP_ADDU_64:
684 switch (mask) {
685 case 8 | 2: /* vr == 0 */
686 cond = TCG_COND_EQ;
687 break;
688 case 4 | 1: /* vr != 0 */
689 cond = TCG_COND_NE;
690 break;
691 case 8 | 4: /* no carry -> vr >= src */
692 cond = TCG_COND_GEU;
693 break;
694 case 2 | 1: /* carry -> vr < src */
695 cond = TCG_COND_LTU;
696 break;
697 default:
698 goto do_dynamic;
700 account_inline_branch(s, old_cc_op);
701 break;
703 case CC_OP_SUBU_32:
704 case CC_OP_SUBU_64:
705 /* Note that CC=0 is impossible; treat it as dont-care. */
706 switch (mask & 7) {
707 case 2: /* zero -> op1 == op2 */
708 cond = TCG_COND_EQ;
709 break;
710 case 4 | 1: /* !zero -> op1 != op2 */
711 cond = TCG_COND_NE;
712 break;
713 case 4: /* borrow (!carry) -> op1 < op2 */
714 cond = TCG_COND_LTU;
715 break;
716 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
717 cond = TCG_COND_GEU;
718 break;
719 default:
720 goto do_dynamic;
722 account_inline_branch(s, old_cc_op);
723 break;
725 default:
726 do_dynamic:
727 /* Calculate cc value. */
728 gen_op_calc_cc(s);
729 /* FALLTHRU */
731 case CC_OP_STATIC:
732 /* Jump based on CC. We'll load up the real cond below;
733 the assignment here merely avoids a compiler warning. */
734 account_noninline_branch(s, old_cc_op);
735 old_cc_op = CC_OP_STATIC;
736 cond = TCG_COND_NEVER;
737 break;
740 /* Load up the arguments of the comparison. */
741 c->is_64 = true;
742 c->g1 = c->g2 = false;
743 switch (old_cc_op) {
744 case CC_OP_LTGT0_32:
745 c->is_64 = false;
746 c->u.s32.a = tcg_temp_new_i32();
747 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
748 c->u.s32.b = tcg_const_i32(0);
749 break;
750 case CC_OP_LTGT_32:
751 case CC_OP_LTUGTU_32:
752 case CC_OP_SUBU_32:
753 c->is_64 = false;
754 c->u.s32.a = tcg_temp_new_i32();
755 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
756 c->u.s32.b = tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
758 break;
760 case CC_OP_LTGT0_64:
761 case CC_OP_NZ:
762 case CC_OP_FLOGR:
763 c->u.s64.a = cc_dst;
764 c->u.s64.b = tcg_const_i64(0);
765 c->g1 = true;
766 break;
767 case CC_OP_LTGT_64:
768 case CC_OP_LTUGTU_64:
769 case CC_OP_SUBU_64:
770 c->u.s64.a = cc_src;
771 c->u.s64.b = cc_dst;
772 c->g1 = c->g2 = true;
773 break;
775 case CC_OP_TM_32:
776 case CC_OP_TM_64:
777 case CC_OP_ICM:
778 c->u.s64.a = tcg_temp_new_i64();
779 c->u.s64.b = tcg_const_i64(0);
780 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
781 break;
783 case CC_OP_ADDU_32:
784 c->is_64 = false;
785 c->u.s32.a = tcg_temp_new_i32();
786 c->u.s32.b = tcg_temp_new_i32();
787 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
788 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
789 tcg_gen_movi_i32(c->u.s32.b, 0);
790 } else {
791 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
793 break;
795 case CC_OP_ADDU_64:
796 c->u.s64.a = cc_vr;
797 c->g1 = true;
798 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
799 c->u.s64.b = tcg_const_i64(0);
800 } else {
801 c->u.s64.b = cc_src;
802 c->g2 = true;
804 break;
806 case CC_OP_STATIC:
807 c->is_64 = false;
808 c->u.s32.a = cc_op;
809 c->g1 = true;
810 switch (mask) {
811 case 0x8 | 0x4 | 0x2: /* cc != 3 */
812 cond = TCG_COND_NE;
813 c->u.s32.b = tcg_const_i32(3);
814 break;
815 case 0x8 | 0x4 | 0x1: /* cc != 2 */
816 cond = TCG_COND_NE;
817 c->u.s32.b = tcg_const_i32(2);
818 break;
819 case 0x8 | 0x2 | 0x1: /* cc != 1 */
820 cond = TCG_COND_NE;
821 c->u.s32.b = tcg_const_i32(1);
822 break;
823 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
824 cond = TCG_COND_EQ;
825 c->g1 = false;
826 c->u.s32.a = tcg_temp_new_i32();
827 c->u.s32.b = tcg_const_i32(0);
828 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
829 break;
830 case 0x8 | 0x4: /* cc < 2 */
831 cond = TCG_COND_LTU;
832 c->u.s32.b = tcg_const_i32(2);
833 break;
834 case 0x8: /* cc == 0 */
835 cond = TCG_COND_EQ;
836 c->u.s32.b = tcg_const_i32(0);
837 break;
838 case 0x4 | 0x2 | 0x1: /* cc != 0 */
839 cond = TCG_COND_NE;
840 c->u.s32.b = tcg_const_i32(0);
841 break;
842 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
843 cond = TCG_COND_NE;
844 c->g1 = false;
845 c->u.s32.a = tcg_temp_new_i32();
846 c->u.s32.b = tcg_const_i32(0);
847 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
848 break;
849 case 0x4: /* cc == 1 */
850 cond = TCG_COND_EQ;
851 c->u.s32.b = tcg_const_i32(1);
852 break;
853 case 0x2 | 0x1: /* cc > 1 */
854 cond = TCG_COND_GTU;
855 c->u.s32.b = tcg_const_i32(1);
856 break;
857 case 0x2: /* cc == 2 */
858 cond = TCG_COND_EQ;
859 c->u.s32.b = tcg_const_i32(2);
860 break;
861 case 0x1: /* cc == 3 */
862 cond = TCG_COND_EQ;
863 c->u.s32.b = tcg_const_i32(3);
864 break;
865 default:
866 /* CC is masked by something else: (8 >> cc) & mask. */
867 cond = TCG_COND_NE;
868 c->g1 = false;
869 c->u.s32.a = tcg_const_i32(8);
870 c->u.s32.b = tcg_const_i32(0);
871 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
872 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
873 break;
875 break;
877 default:
878 abort();
880 c->cond = cond;
883 static void free_compare(DisasCompare *c)
885 if (!c->g1) {
886 if (c->is_64) {
887 tcg_temp_free_i64(c->u.s64.a);
888 } else {
889 tcg_temp_free_i32(c->u.s32.a);
892 if (!c->g2) {
893 if (c->is_64) {
894 tcg_temp_free_i64(c->u.s64.b);
895 } else {
896 tcg_temp_free_i32(c->u.s32.b);
901 /* ====================================================================== */
902 /* Define the insn format enumeration. */
903 #define F0(N) FMT_##N,
904 #define F1(N, X1) F0(N)
905 #define F2(N, X1, X2) F0(N)
906 #define F3(N, X1, X2, X3) F0(N)
907 #define F4(N, X1, X2, X3, X4) F0(N)
908 #define F5(N, X1, X2, X3, X4, X5) F0(N)
910 typedef enum {
911 #include "insn-format.def"
912 } DisasFormat;
914 #undef F0
915 #undef F1
916 #undef F2
917 #undef F3
918 #undef F4
919 #undef F5
921 /* Define a structure to hold the decoded fields. We'll store each inside
922 an array indexed by an enum. In order to conserve memory, we'll arrange
923 for fields that do not exist at the same time to overlap, thus the "C"
924 for compact. For checking purposes there is an "O" for original index
925 as well that will be applied to availability bitmaps. */
927 enum DisasFieldIndexO {
928 FLD_O_r1,
929 FLD_O_r2,
930 FLD_O_r3,
931 FLD_O_m1,
932 FLD_O_m3,
933 FLD_O_m4,
934 FLD_O_b1,
935 FLD_O_b2,
936 FLD_O_b4,
937 FLD_O_d1,
938 FLD_O_d2,
939 FLD_O_d4,
940 FLD_O_x2,
941 FLD_O_l1,
942 FLD_O_l2,
943 FLD_O_i1,
944 FLD_O_i2,
945 FLD_O_i3,
946 FLD_O_i4,
947 FLD_O_i5
950 enum DisasFieldIndexC {
951 FLD_C_r1 = 0,
952 FLD_C_m1 = 0,
953 FLD_C_b1 = 0,
954 FLD_C_i1 = 0,
956 FLD_C_r2 = 1,
957 FLD_C_b2 = 1,
958 FLD_C_i2 = 1,
960 FLD_C_r3 = 2,
961 FLD_C_m3 = 2,
962 FLD_C_i3 = 2,
964 FLD_C_m4 = 3,
965 FLD_C_b4 = 3,
966 FLD_C_i4 = 3,
967 FLD_C_l1 = 3,
969 FLD_C_i5 = 4,
970 FLD_C_d1 = 4,
972 FLD_C_d2 = 5,
974 FLD_C_d4 = 6,
975 FLD_C_x2 = 6,
976 FLD_C_l2 = 6,
978 NUM_C_FIELD = 7
981 struct DisasFields {
982 unsigned op:8;
983 unsigned op2:8;
984 unsigned presentC:16;
985 unsigned int presentO;
986 int c[NUM_C_FIELD];
989 /* This is the way fields are to be accessed out of DisasFields. */
990 #define have_field(S, F) have_field1((S), FLD_O_##F)
991 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
993 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
995 return (f->presentO >> c) & 1;
998 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
999 enum DisasFieldIndexC c)
1001 assert(have_field1(f, o));
1002 return f->c[c];
1005 /* Describe the layout of each field in each format. */
1006 typedef struct DisasField {
1007 unsigned int beg:8;
1008 unsigned int size:8;
1009 unsigned int type:2;
1010 unsigned int indexC:6;
1011 enum DisasFieldIndexO indexO:8;
1012 } DisasField;
1014 typedef struct DisasFormatInfo {
1015 DisasField op[NUM_C_FIELD];
1016 } DisasFormatInfo;
1018 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1019 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1020 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1022 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1024 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1025 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1027 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1029 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1030 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1031 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1033 #define F0(N) { { } },
1034 #define F1(N, X1) { { X1 } },
1035 #define F2(N, X1, X2) { { X1, X2 } },
1036 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1037 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1038 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1040 static const DisasFormatInfo format_info[] = {
1041 #include "insn-format.def"
1044 #undef F0
1045 #undef F1
1046 #undef F2
1047 #undef F3
1048 #undef F4
1049 #undef F5
1050 #undef R
1051 #undef M
1052 #undef BD
1053 #undef BXD
1054 #undef BDL
1055 #undef BXDL
1056 #undef I
1057 #undef L
1059 /* Generally, we'll extract operands into this structures, operate upon
1060 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1061 of routines below for more details. */
1062 typedef struct {
1063 bool g_out, g_out2, g_in1, g_in2;
1064 TCGv_i64 out, out2, in1, in2;
1065 TCGv_i64 addr1;
1066 } DisasOps;
1068 /* Instructions can place constraints on their operands, raising specification
1069 exceptions if they are violated. To make this easy to automate, each "in1",
1070 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1071 of the following, or 0. To make this easy to document, we'll put the
1072 SPEC_<name> defines next to <name>. */
1074 #define SPEC_r1_even 1
1075 #define SPEC_r2_even 2
1076 #define SPEC_r3_even 4
1077 #define SPEC_r1_f128 8
1078 #define SPEC_r2_f128 16
1080 /* Return values from translate_one, indicating the state of the TB. */
1081 typedef enum {
1082 /* Continue the TB. */
1083 NO_EXIT,
1084 /* We have emitted one or more goto_tb. No fixup required. */
1085 EXIT_GOTO_TB,
1086 /* We are not using a goto_tb (for whatever reason), but have updated
1087 the PC (for whatever reason), so there's no need to do it again on
1088 exiting the TB. */
1089 EXIT_PC_UPDATED,
1090 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1091 updated the PC for the next instruction to be executed. */
1092 EXIT_PC_STALE,
1093 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1094 No following code will be executed. */
1095 EXIT_NORETURN,
1096 } ExitStatus;
1098 typedef enum DisasFacility {
1099 FAC_Z, /* zarch (default) */
1100 FAC_CASS, /* compare and swap and store */
1101 FAC_CASS2, /* compare and swap and store 2*/
1102 FAC_DFP, /* decimal floating point */
1103 FAC_DFPR, /* decimal floating point rounding */
1104 FAC_DO, /* distinct operands */
1105 FAC_EE, /* execute extensions */
1106 FAC_EI, /* extended immediate */
1107 FAC_FPE, /* floating point extension */
1108 FAC_FPSSH, /* floating point support sign handling */
1109 FAC_FPRGR, /* FPR-GR transfer */
1110 FAC_GIE, /* general instructions extension */
1111 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1112 FAC_HW, /* high-word */
1113 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1114 FAC_LOC, /* load/store on condition */
1115 FAC_LD, /* long displacement */
1116 FAC_PC, /* population count */
1117 FAC_SCF, /* store clock fast */
1118 FAC_SFLE, /* store facility list extended */
1119 } DisasFacility;
1121 struct DisasInsn {
1122 unsigned opc:16;
1123 DisasFormat fmt:8;
1124 DisasFacility fac:8;
1125 unsigned spec:8;
1127 const char *name;
1129 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1130 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1131 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1132 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_cout)(DisasContext *, DisasOps *);
1134 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1136 uint64_t data;
1139 /* ====================================================================== */
1140 /* Miscellaneous helpers, used by several operations. */
1142 static void help_l2_shift(DisasContext *s, DisasFields *f,
1143 DisasOps *o, int mask)
1145 int b2 = get_field(f, b2);
1146 int d2 = get_field(f, d2);
1148 if (b2 == 0) {
1149 o->in2 = tcg_const_i64(d2 & mask);
1150 } else {
1151 o->in2 = get_address(s, 0, b2, d2);
1152 tcg_gen_andi_i64(o->in2, o->in2, mask);
1156 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1158 if (dest == s->next_pc) {
1159 return NO_EXIT;
1161 if (use_goto_tb(s, dest)) {
1162 update_cc_op(s);
1163 tcg_gen_goto_tb(0);
1164 tcg_gen_movi_i64(psw_addr, dest);
1165 tcg_gen_exit_tb((uintptr_t)s->tb);
1166 return EXIT_GOTO_TB;
1167 } else {
1168 tcg_gen_movi_i64(psw_addr, dest);
1169 return EXIT_PC_UPDATED;
1173 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1174 bool is_imm, int imm, TCGv_i64 cdest)
1176 ExitStatus ret;
1177 uint64_t dest = s->pc + 2 * imm;
1178 int lab;
1180 /* Take care of the special cases first. */
1181 if (c->cond == TCG_COND_NEVER) {
1182 ret = NO_EXIT;
1183 goto egress;
1185 if (is_imm) {
1186 if (dest == s->next_pc) {
1187 /* Branch to next. */
1188 ret = NO_EXIT;
1189 goto egress;
1191 if (c->cond == TCG_COND_ALWAYS) {
1192 ret = help_goto_direct(s, dest);
1193 goto egress;
1195 } else {
1196 if (TCGV_IS_UNUSED_I64(cdest)) {
1197 /* E.g. bcr %r0 -> no branch. */
1198 ret = NO_EXIT;
1199 goto egress;
1201 if (c->cond == TCG_COND_ALWAYS) {
1202 tcg_gen_mov_i64(psw_addr, cdest);
1203 ret = EXIT_PC_UPDATED;
1204 goto egress;
1208 if (use_goto_tb(s, s->next_pc)) {
1209 if (is_imm && use_goto_tb(s, dest)) {
1210 /* Both exits can use goto_tb. */
1211 update_cc_op(s);
1213 lab = gen_new_label();
1214 if (c->is_64) {
1215 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1216 } else {
1217 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1220 /* Branch not taken. */
1221 tcg_gen_goto_tb(0);
1222 tcg_gen_movi_i64(psw_addr, s->next_pc);
1223 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1225 /* Branch taken. */
1226 gen_set_label(lab);
1227 tcg_gen_goto_tb(1);
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1231 ret = EXIT_GOTO_TB;
1232 } else {
1233 /* Fallthru can use goto_tb, but taken branch cannot. */
1234 /* Store taken branch destination before the brcond. This
1235 avoids having to allocate a new local temp to hold it.
1236 We'll overwrite this in the not taken case anyway. */
1237 if (!is_imm) {
1238 tcg_gen_mov_i64(psw_addr, cdest);
1241 lab = gen_new_label();
1242 if (c->is_64) {
1243 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1244 } else {
1245 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1248 /* Branch not taken. */
1249 update_cc_op(s);
1250 tcg_gen_goto_tb(0);
1251 tcg_gen_movi_i64(psw_addr, s->next_pc);
1252 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1254 gen_set_label(lab);
1255 if (is_imm) {
1256 tcg_gen_movi_i64(psw_addr, dest);
1258 ret = EXIT_PC_UPDATED;
1260 } else {
1261 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1262 Most commonly we're single-stepping or some other condition that
1263 disables all use of goto_tb. Just update the PC and exit. */
1265 TCGv_i64 next = tcg_const_i64(s->next_pc);
1266 if (is_imm) {
1267 cdest = tcg_const_i64(dest);
1270 if (c->is_64) {
1271 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1272 cdest, next);
1273 } else {
1274 TCGv_i32 t0 = tcg_temp_new_i32();
1275 TCGv_i64 t1 = tcg_temp_new_i64();
1276 TCGv_i64 z = tcg_const_i64(0);
1277 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1278 tcg_gen_extu_i32_i64(t1, t0);
1279 tcg_temp_free_i32(t0);
1280 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1281 tcg_temp_free_i64(t1);
1282 tcg_temp_free_i64(z);
1285 if (is_imm) {
1286 tcg_temp_free_i64(cdest);
1288 tcg_temp_free_i64(next);
1290 ret = EXIT_PC_UPDATED;
1293 egress:
1294 free_compare(c);
1295 return ret;
1298 /* ====================================================================== */
1299 /* The operations. These perform the bulk of the work for any insn,
1300 usually after the operands have been loaded and output initialized. */
1302 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1304 gen_helper_abs_i64(o->out, o->in2);
1305 return NO_EXIT;
1308 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1310 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1311 return NO_EXIT;
1314 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1316 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1317 return NO_EXIT;
1320 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1322 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1323 tcg_gen_mov_i64(o->out2, o->in2);
1324 return NO_EXIT;
1327 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1329 tcg_gen_add_i64(o->out, o->in1, o->in2);
1330 return NO_EXIT;
1333 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1335 DisasCompare cmp;
1336 TCGv_i64 carry;
1338 tcg_gen_add_i64(o->out, o->in1, o->in2);
1340 /* The carry flag is the msb of CC, therefore the branch mask that would
1341 create that comparison is 3. Feeding the generated comparison to
1342 setcond produces the carry flag that we desire. */
1343 disas_jcc(s, &cmp, 3);
1344 carry = tcg_temp_new_i64();
1345 if (cmp.is_64) {
1346 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1347 } else {
1348 TCGv_i32 t = tcg_temp_new_i32();
1349 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1350 tcg_gen_extu_i32_i64(carry, t);
1351 tcg_temp_free_i32(t);
1353 free_compare(&cmp);
1355 tcg_gen_add_i64(o->out, o->out, carry);
1356 tcg_temp_free_i64(carry);
1357 return NO_EXIT;
1360 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1362 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1363 return NO_EXIT;
1366 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1368 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1369 return NO_EXIT;
1372 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1374 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1375 return_low128(o->out2);
1376 return NO_EXIT;
1379 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1381 tcg_gen_and_i64(o->out, o->in1, o->in2);
1382 return NO_EXIT;
1385 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1387 int shift = s->insn->data & 0xff;
1388 int size = s->insn->data >> 8;
1389 uint64_t mask = ((1ull << size) - 1) << shift;
1391 assert(!o->g_in2);
1392 tcg_gen_shli_i64(o->in2, o->in2, shift);
1393 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1394 tcg_gen_and_i64(o->out, o->in1, o->in2);
1396 /* Produce the CC from only the bits manipulated. */
1397 tcg_gen_andi_i64(cc_dst, o->out, mask);
1398 set_cc_nz_u64(s, cc_dst);
1399 return NO_EXIT;
1402 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1404 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1405 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1406 tcg_gen_mov_i64(psw_addr, o->in2);
1407 return EXIT_PC_UPDATED;
1408 } else {
1409 return NO_EXIT;
1413 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1415 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1416 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1419 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1421 int m1 = get_field(s->fields, m1);
1422 bool is_imm = have_field(s->fields, i2);
1423 int imm = is_imm ? get_field(s->fields, i2) : 0;
1424 DisasCompare c;
1426 disas_jcc(s, &c, m1);
1427 return help_branch(s, &c, is_imm, imm, o->in2);
1430 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1432 int r1 = get_field(s->fields, r1);
1433 bool is_imm = have_field(s->fields, i2);
1434 int imm = is_imm ? get_field(s->fields, i2) : 0;
1435 DisasCompare c;
1436 TCGv_i64 t;
1438 c.cond = TCG_COND_NE;
1439 c.is_64 = false;
1440 c.g1 = false;
1441 c.g2 = false;
1443 t = tcg_temp_new_i64();
1444 tcg_gen_subi_i64(t, regs[r1], 1);
1445 store_reg32_i64(r1, t);
1446 c.u.s32.a = tcg_temp_new_i32();
1447 c.u.s32.b = tcg_const_i32(0);
1448 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1449 tcg_temp_free_i64(t);
1451 return help_branch(s, &c, is_imm, imm, o->in2);
1454 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1456 int r1 = get_field(s->fields, r1);
1457 bool is_imm = have_field(s->fields, i2);
1458 int imm = is_imm ? get_field(s->fields, i2) : 0;
1459 DisasCompare c;
1461 c.cond = TCG_COND_NE;
1462 c.is_64 = true;
1463 c.g1 = true;
1464 c.g2 = false;
1466 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1467 c.u.s64.a = regs[r1];
1468 c.u.s64.b = tcg_const_i64(0);
1470 return help_branch(s, &c, is_imm, imm, o->in2);
1473 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1475 int r1 = get_field(s->fields, r1);
1476 int r3 = get_field(s->fields, r3);
1477 bool is_imm = have_field(s->fields, i2);
1478 int imm = is_imm ? get_field(s->fields, i2) : 0;
1479 DisasCompare c;
1480 TCGv_i64 t;
1482 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1483 c.is_64 = false;
1484 c.g1 = false;
1485 c.g2 = false;
1487 t = tcg_temp_new_i64();
1488 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1489 c.u.s32.a = tcg_temp_new_i32();
1490 c.u.s32.b = tcg_temp_new_i32();
1491 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1492 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1493 store_reg32_i64(r1, t);
1494 tcg_temp_free_i64(t);
1496 return help_branch(s, &c, is_imm, imm, o->in2);
1499 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1501 int r1 = get_field(s->fields, r1);
1502 int r3 = get_field(s->fields, r3);
1503 bool is_imm = have_field(s->fields, i2);
1504 int imm = is_imm ? get_field(s->fields, i2) : 0;
1505 DisasCompare c;
1507 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1508 c.is_64 = true;
1510 if (r1 == (r3 | 1)) {
1511 c.u.s64.b = load_reg(r3 | 1);
1512 c.g2 = false;
1513 } else {
1514 c.u.s64.b = regs[r3 | 1];
1515 c.g2 = true;
1518 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1519 c.u.s64.a = regs[r1];
1520 c.g1 = true;
1522 return help_branch(s, &c, is_imm, imm, o->in2);
1525 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1527 int imm, m3 = get_field(s->fields, m3);
1528 bool is_imm;
1529 DisasCompare c;
1531 c.cond = ltgt_cond[m3];
1532 if (s->insn->data) {
1533 c.cond = tcg_unsigned_cond(c.cond);
1535 c.is_64 = c.g1 = c.g2 = true;
1536 c.u.s64.a = o->in1;
1537 c.u.s64.b = o->in2;
1539 is_imm = have_field(s->fields, i4);
1540 if (is_imm) {
1541 imm = get_field(s->fields, i4);
1542 } else {
1543 imm = 0;
1544 o->out = get_address(s, 0, get_field(s->fields, b4),
1545 get_field(s->fields, d4));
1548 return help_branch(s, &c, is_imm, imm, o->out);
1551 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1553 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1554 set_cc_static(s);
1555 return NO_EXIT;
1558 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1560 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1561 set_cc_static(s);
1562 return NO_EXIT;
1565 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1567 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1568 set_cc_static(s);
1569 return NO_EXIT;
1572 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1574 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1575 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1576 tcg_temp_free_i32(m3);
1577 gen_set_cc_nz_f32(s, o->in2);
1578 return NO_EXIT;
1581 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1583 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1584 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1585 tcg_temp_free_i32(m3);
1586 gen_set_cc_nz_f64(s, o->in2);
1587 return NO_EXIT;
1590 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1592 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1593 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1594 tcg_temp_free_i32(m3);
1595 gen_set_cc_nz_f128(s, o->in1, o->in2);
1596 return NO_EXIT;
1599 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1601 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1602 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1603 tcg_temp_free_i32(m3);
1604 gen_set_cc_nz_f32(s, o->in2);
1605 return NO_EXIT;
1608 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1610 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1611 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1612 tcg_temp_free_i32(m3);
1613 gen_set_cc_nz_f64(s, o->in2);
1614 return NO_EXIT;
1617 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1619 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1620 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1621 tcg_temp_free_i32(m3);
1622 gen_set_cc_nz_f128(s, o->in1, o->in2);
1623 return NO_EXIT;
1626 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1628 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1629 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1630 tcg_temp_free_i32(m3);
1631 gen_set_cc_nz_f32(s, o->in2);
1632 return NO_EXIT;
1635 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1637 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1638 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1639 tcg_temp_free_i32(m3);
1640 gen_set_cc_nz_f64(s, o->in2);
1641 return NO_EXIT;
1644 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1646 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1647 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1648 tcg_temp_free_i32(m3);
1649 gen_set_cc_nz_f128(s, o->in1, o->in2);
1650 return NO_EXIT;
1653 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1655 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1656 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1657 tcg_temp_free_i32(m3);
1658 gen_set_cc_nz_f32(s, o->in2);
1659 return NO_EXIT;
1662 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1664 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1665 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1666 tcg_temp_free_i32(m3);
1667 gen_set_cc_nz_f64(s, o->in2);
1668 return NO_EXIT;
1671 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1673 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1674 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1675 tcg_temp_free_i32(m3);
1676 gen_set_cc_nz_f128(s, o->in1, o->in2);
1677 return NO_EXIT;
1680 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1682 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1683 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1684 tcg_temp_free_i32(m3);
1685 return NO_EXIT;
1688 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1690 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1691 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1692 tcg_temp_free_i32(m3);
1693 return NO_EXIT;
1696 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1698 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1699 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1700 tcg_temp_free_i32(m3);
1701 return_low128(o->out2);
1702 return NO_EXIT;
1705 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1707 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1708 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1709 tcg_temp_free_i32(m3);
1710 return NO_EXIT;
1713 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1715 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1716 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1717 tcg_temp_free_i32(m3);
1718 return NO_EXIT;
1721 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1723 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1724 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1725 tcg_temp_free_i32(m3);
1726 return_low128(o->out2);
1727 return NO_EXIT;
1730 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1732 int r2 = get_field(s->fields, r2);
1733 TCGv_i64 len = tcg_temp_new_i64();
1735 potential_page_fault(s);
1736 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1737 set_cc_static(s);
1738 return_low128(o->out);
1740 tcg_gen_add_i64(regs[r2], regs[r2], len);
1741 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1742 tcg_temp_free_i64(len);
1744 return NO_EXIT;
1747 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1749 int l = get_field(s->fields, l1);
1750 TCGv_i32 vl;
1752 switch (l + 1) {
1753 case 1:
1754 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1755 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1756 break;
1757 case 2:
1758 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1759 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1760 break;
1761 case 4:
1762 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1763 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1764 break;
1765 case 8:
1766 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1767 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1768 break;
1769 default:
1770 potential_page_fault(s);
1771 vl = tcg_const_i32(l);
1772 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1773 tcg_temp_free_i32(vl);
1774 set_cc_static(s);
1775 return NO_EXIT;
1777 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1778 return NO_EXIT;
1781 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1783 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1784 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1785 potential_page_fault(s);
1786 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1787 tcg_temp_free_i32(r1);
1788 tcg_temp_free_i32(r3);
1789 set_cc_static(s);
1790 return NO_EXIT;
1793 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1795 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1796 TCGv_i32 t1 = tcg_temp_new_i32();
1797 tcg_gen_trunc_i64_i32(t1, o->in1);
1798 potential_page_fault(s);
1799 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1800 set_cc_static(s);
1801 tcg_temp_free_i32(t1);
1802 tcg_temp_free_i32(m3);
1803 return NO_EXIT;
1806 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1808 potential_page_fault(s);
1809 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1810 set_cc_static(s);
1811 return_low128(o->in2);
1812 return NO_EXIT;
1815 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1817 TCGv_i64 t = tcg_temp_new_i64();
1818 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1819 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1820 tcg_gen_or_i64(o->out, o->out, t);
1821 tcg_temp_free_i64(t);
1822 return NO_EXIT;
1825 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1827 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1828 int d2 = get_field(s->fields, d2);
1829 int b2 = get_field(s->fields, b2);
1830 int is_64 = s->insn->data;
1831 TCGv_i64 addr, mem, cc, z;
1833 /* Note that in1 = R3 (new value) and
1834 in2 = (zero-extended) R1 (expected value). */
1836 /* Load the memory into the (temporary) output. While the PoO only talks
1837 about moving the memory to R1 on inequality, if we include equality it
1838 means that R1 is equal to the memory in all conditions. */
1839 addr = get_address(s, 0, b2, d2);
1840 if (is_64) {
1841 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1842 } else {
1843 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1846 /* Are the memory and expected values (un)equal? Note that this setcond
1847 produces the output CC value, thus the NE sense of the test. */
1848 cc = tcg_temp_new_i64();
1849 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1851 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1852 Recall that we are allowed to unconditionally issue the store (and
1853 thus any possible write trap), so (re-)store the original contents
1854 of MEM in case of inequality. */
1855 z = tcg_const_i64(0);
1856 mem = tcg_temp_new_i64();
1857 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1858 if (is_64) {
1859 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1860 } else {
1861 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1863 tcg_temp_free_i64(z);
1864 tcg_temp_free_i64(mem);
1865 tcg_temp_free_i64(addr);
1867 /* Store CC back to cc_op. Wait until after the store so that any
1868 exception gets the old cc_op value. */
1869 tcg_gen_trunc_i64_i32(cc_op, cc);
1870 tcg_temp_free_i64(cc);
1871 set_cc_static(s);
1872 return NO_EXIT;
1875 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1877 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1878 int r1 = get_field(s->fields, r1);
1879 int r3 = get_field(s->fields, r3);
1880 int d2 = get_field(s->fields, d2);
1881 int b2 = get_field(s->fields, b2);
1882 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1884 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1886 addrh = get_address(s, 0, b2, d2);
1887 addrl = get_address(s, 0, b2, d2 + 8);
1888 outh = tcg_temp_new_i64();
1889 outl = tcg_temp_new_i64();
1891 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1892 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1894 /* Fold the double-word compare with arithmetic. */
1895 cc = tcg_temp_new_i64();
1896 z = tcg_temp_new_i64();
1897 tcg_gen_xor_i64(cc, outh, regs[r1]);
1898 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1899 tcg_gen_or_i64(cc, cc, z);
1900 tcg_gen_movi_i64(z, 0);
1901 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1903 memh = tcg_temp_new_i64();
1904 meml = tcg_temp_new_i64();
1905 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1906 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1907 tcg_temp_free_i64(z);
1909 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1910 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1911 tcg_temp_free_i64(memh);
1912 tcg_temp_free_i64(meml);
1913 tcg_temp_free_i64(addrh);
1914 tcg_temp_free_i64(addrl);
1916 /* Save back state now that we've passed all exceptions. */
1917 tcg_gen_mov_i64(regs[r1], outh);
1918 tcg_gen_mov_i64(regs[r1 + 1], outl);
1919 tcg_gen_trunc_i64_i32(cc_op, cc);
1920 tcg_temp_free_i64(outh);
1921 tcg_temp_free_i64(outl);
1922 tcg_temp_free_i64(cc);
1923 set_cc_static(s);
1924 return NO_EXIT;
1927 #ifndef CONFIG_USER_ONLY
1928 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1930 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1931 check_privileged(s);
1932 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1933 tcg_temp_free_i32(r1);
1934 set_cc_static(s);
1935 return NO_EXIT;
1937 #endif
1939 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1941 TCGv_i64 t1 = tcg_temp_new_i64();
1942 TCGv_i32 t2 = tcg_temp_new_i32();
1943 tcg_gen_trunc_i64_i32(t2, o->in1);
1944 gen_helper_cvd(t1, t2);
1945 tcg_temp_free_i32(t2);
1946 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1947 tcg_temp_free_i64(t1);
1948 return NO_EXIT;
1951 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1953 int m3 = get_field(s->fields, m3);
1954 int lab = gen_new_label();
1955 TCGv_i32 t;
1956 TCGCond c;
1958 c = tcg_invert_cond(ltgt_cond[m3]);
1959 if (s->insn->data) {
1960 c = tcg_unsigned_cond(c);
1962 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1964 /* Set DXC to 0xff. */
1965 t = tcg_temp_new_i32();
1966 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1967 tcg_gen_ori_i32(t, t, 0xff00);
1968 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1969 tcg_temp_free_i32(t);
1971 /* Trap. */
1972 gen_program_exception(s, PGM_DATA);
1974 gen_set_label(lab);
1975 return NO_EXIT;
1978 #ifndef CONFIG_USER_ONLY
1979 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1981 TCGv_i32 tmp;
1983 check_privileged(s);
1984 potential_page_fault(s);
1986 /* We pretend the format is RX_a so that D2 is the field we want. */
1987 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1988 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1989 tcg_temp_free_i32(tmp);
1990 return NO_EXIT;
1992 #endif
1994 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1996 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1997 return_low128(o->out);
1998 return NO_EXIT;
2001 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2003 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2004 return_low128(o->out);
2005 return NO_EXIT;
2008 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2010 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2011 return_low128(o->out);
2012 return NO_EXIT;
2015 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2017 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2018 return_low128(o->out);
2019 return NO_EXIT;
2022 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2024 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2025 return NO_EXIT;
2028 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2030 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2031 return NO_EXIT;
2034 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2036 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2037 return_low128(o->out2);
2038 return NO_EXIT;
2041 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2043 int r2 = get_field(s->fields, r2);
2044 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2045 return NO_EXIT;
2048 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2050 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2051 return NO_EXIT;
2054 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2056 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2057 tb->flags, (ab)use the tb->cs_base field as the address of
2058 the template in memory, and grab 8 bits of tb->flags/cflags for
2059 the contents of the register. We would then recognize all this
2060 in gen_intermediate_code_internal, generating code for exactly
2061 one instruction. This new TB then gets executed normally.
2063 On the other hand, this seems to be mostly used for modifying
2064 MVC inside of memcpy, which needs a helper call anyway. So
2065 perhaps this doesn't bear thinking about any further. */
2067 TCGv_i64 tmp;
2069 update_psw_addr(s);
2070 update_cc_op(s);
2072 tmp = tcg_const_i64(s->next_pc);
2073 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2074 tcg_temp_free_i64(tmp);
2076 set_cc_static(s);
2077 return NO_EXIT;
2080 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2082 /* We'll use the original input for cc computation, since we get to
2083 compare that against 0, which ought to be better than comparing
2084 the real output against 64. It also lets cc_dst be a convenient
2085 temporary during our computation. */
2086 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2088 /* R1 = IN ? CLZ(IN) : 64. */
2089 gen_helper_clz(o->out, o->in2);
2091 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2092 value by 64, which is undefined. But since the shift is 64 iff the
2093 input is zero, we still get the correct result after and'ing. */
2094 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2095 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2096 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2097 return NO_EXIT;
2100 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2102 int m3 = get_field(s->fields, m3);
2103 int pos, len, base = s->insn->data;
2104 TCGv_i64 tmp = tcg_temp_new_i64();
2105 uint64_t ccm;
2107 switch (m3) {
2108 case 0xf:
2109 /* Effectively a 32-bit load. */
2110 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2111 len = 32;
2112 goto one_insert;
2114 case 0xc:
2115 case 0x6:
2116 case 0x3:
2117 /* Effectively a 16-bit load. */
2118 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2119 len = 16;
2120 goto one_insert;
2122 case 0x8:
2123 case 0x4:
2124 case 0x2:
2125 case 0x1:
2126 /* Effectively an 8-bit load. */
2127 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2128 len = 8;
2129 goto one_insert;
2131 one_insert:
2132 pos = base + ctz32(m3) * 8;
2133 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2134 ccm = ((1ull << len) - 1) << pos;
2135 break;
2137 default:
2138 /* This is going to be a sequence of loads and inserts. */
2139 pos = base + 32 - 8;
2140 ccm = 0;
2141 while (m3) {
2142 if (m3 & 0x8) {
2143 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2144 tcg_gen_addi_i64(o->in2, o->in2, 1);
2145 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2146 ccm |= 0xff << pos;
2148 m3 = (m3 << 1) & 0xf;
2149 pos -= 8;
2151 break;
2154 tcg_gen_movi_i64(tmp, ccm);
2155 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2156 tcg_temp_free_i64(tmp);
2157 return NO_EXIT;
2160 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2162 int shift = s->insn->data & 0xff;
2163 int size = s->insn->data >> 8;
2164 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2165 return NO_EXIT;
2168 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2170 TCGv_i64 t1;
2172 gen_op_calc_cc(s);
2173 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2175 t1 = tcg_temp_new_i64();
2176 tcg_gen_shli_i64(t1, psw_mask, 20);
2177 tcg_gen_shri_i64(t1, t1, 36);
2178 tcg_gen_or_i64(o->out, o->out, t1);
2180 tcg_gen_extu_i32_i64(t1, cc_op);
2181 tcg_gen_shli_i64(t1, t1, 28);
2182 tcg_gen_or_i64(o->out, o->out, t1);
2183 tcg_temp_free_i64(t1);
2184 return NO_EXIT;
2187 #ifndef CONFIG_USER_ONLY
2188 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2190 check_privileged(s);
2191 gen_helper_ipte(cpu_env, o->in1, o->in2);
2192 return NO_EXIT;
2195 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2197 check_privileged(s);
2198 gen_helper_iske(o->out, cpu_env, o->in2);
2199 return NO_EXIT;
2201 #endif
2203 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2205 gen_helper_ldeb(o->out, cpu_env, o->in2);
2206 return NO_EXIT;
2209 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2211 gen_helper_ledb(o->out, cpu_env, o->in2);
2212 return NO_EXIT;
2215 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2217 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2218 return NO_EXIT;
2221 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2223 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2224 return NO_EXIT;
2227 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2229 gen_helper_lxdb(o->out, cpu_env, o->in2);
2230 return_low128(o->out2);
2231 return NO_EXIT;
2234 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2236 gen_helper_lxeb(o->out, cpu_env, o->in2);
2237 return_low128(o->out2);
2238 return NO_EXIT;
2241 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2243 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2244 return NO_EXIT;
2247 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2249 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2250 return NO_EXIT;
2253 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2255 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2256 return NO_EXIT;
2259 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2261 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2262 return NO_EXIT;
2265 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2267 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2268 return NO_EXIT;
2271 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2273 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2274 return NO_EXIT;
2277 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2279 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2280 return NO_EXIT;
2283 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2285 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2286 return NO_EXIT;
2289 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2291 DisasCompare c;
2293 disas_jcc(s, &c, get_field(s->fields, m3));
2295 if (c.is_64) {
2296 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2297 o->in2, o->in1);
2298 free_compare(&c);
2299 } else {
2300 TCGv_i32 t32 = tcg_temp_new_i32();
2301 TCGv_i64 t, z;
2303 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2304 free_compare(&c);
2306 t = tcg_temp_new_i64();
2307 tcg_gen_extu_i32_i64(t, t32);
2308 tcg_temp_free_i32(t32);
2310 z = tcg_const_i64(0);
2311 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2312 tcg_temp_free_i64(t);
2313 tcg_temp_free_i64(z);
2316 return NO_EXIT;
2319 #ifndef CONFIG_USER_ONLY
2320 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2322 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2323 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2324 check_privileged(s);
2325 potential_page_fault(s);
2326 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2327 tcg_temp_free_i32(r1);
2328 tcg_temp_free_i32(r3);
2329 return NO_EXIT;
2332 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2334 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2335 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2336 check_privileged(s);
2337 potential_page_fault(s);
2338 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2339 tcg_temp_free_i32(r1);
2340 tcg_temp_free_i32(r3);
2341 return NO_EXIT;
2343 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2345 check_privileged(s);
2346 potential_page_fault(s);
2347 gen_helper_lra(o->out, cpu_env, o->in2);
2348 set_cc_static(s);
2349 return NO_EXIT;
2352 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2354 TCGv_i64 t1, t2;
2356 check_privileged(s);
2358 t1 = tcg_temp_new_i64();
2359 t2 = tcg_temp_new_i64();
2360 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2361 tcg_gen_addi_i64(o->in2, o->in2, 4);
2362 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2363 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2364 tcg_gen_shli_i64(t1, t1, 32);
2365 gen_helper_load_psw(cpu_env, t1, t2);
2366 tcg_temp_free_i64(t1);
2367 tcg_temp_free_i64(t2);
2368 return EXIT_NORETURN;
2371 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2373 TCGv_i64 t1, t2;
2375 check_privileged(s);
2377 t1 = tcg_temp_new_i64();
2378 t2 = tcg_temp_new_i64();
2379 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2380 tcg_gen_addi_i64(o->in2, o->in2, 8);
2381 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2382 gen_helper_load_psw(cpu_env, t1, t2);
2383 tcg_temp_free_i64(t1);
2384 tcg_temp_free_i64(t2);
2385 return EXIT_NORETURN;
2387 #endif
2389 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2391 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2392 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2393 potential_page_fault(s);
2394 gen_helper_lam(cpu_env, r1, o->in2, r3);
2395 tcg_temp_free_i32(r1);
2396 tcg_temp_free_i32(r3);
2397 return NO_EXIT;
2400 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2402 int r1 = get_field(s->fields, r1);
2403 int r3 = get_field(s->fields, r3);
2404 TCGv_i64 t = tcg_temp_new_i64();
2405 TCGv_i64 t4 = tcg_const_i64(4);
2407 while (1) {
2408 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2409 store_reg32_i64(r1, t);
2410 if (r1 == r3) {
2411 break;
2413 tcg_gen_add_i64(o->in2, o->in2, t4);
2414 r1 = (r1 + 1) & 15;
2417 tcg_temp_free_i64(t);
2418 tcg_temp_free_i64(t4);
2419 return NO_EXIT;
2422 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2424 int r1 = get_field(s->fields, r1);
2425 int r3 = get_field(s->fields, r3);
2426 TCGv_i64 t = tcg_temp_new_i64();
2427 TCGv_i64 t4 = tcg_const_i64(4);
2429 while (1) {
2430 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2431 store_reg32h_i64(r1, t);
2432 if (r1 == r3) {
2433 break;
2435 tcg_gen_add_i64(o->in2, o->in2, t4);
2436 r1 = (r1 + 1) & 15;
2439 tcg_temp_free_i64(t);
2440 tcg_temp_free_i64(t4);
2441 return NO_EXIT;
2444 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2446 int r1 = get_field(s->fields, r1);
2447 int r3 = get_field(s->fields, r3);
2448 TCGv_i64 t8 = tcg_const_i64(8);
2450 while (1) {
2451 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2452 if (r1 == r3) {
2453 break;
2455 tcg_gen_add_i64(o->in2, o->in2, t8);
2456 r1 = (r1 + 1) & 15;
2459 tcg_temp_free_i64(t8);
2460 return NO_EXIT;
2463 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2465 o->out = o->in2;
2466 o->g_out = o->g_in2;
2467 TCGV_UNUSED_I64(o->in2);
2468 o->g_in2 = false;
2469 return NO_EXIT;
2472 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2474 o->out = o->in1;
2475 o->out2 = o->in2;
2476 o->g_out = o->g_in1;
2477 o->g_out2 = o->g_in2;
2478 TCGV_UNUSED_I64(o->in1);
2479 TCGV_UNUSED_I64(o->in2);
2480 o->g_in1 = o->g_in2 = false;
2481 return NO_EXIT;
2484 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2486 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2487 potential_page_fault(s);
2488 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2489 tcg_temp_free_i32(l);
2490 return NO_EXIT;
2493 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2495 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2496 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2497 potential_page_fault(s);
2498 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2499 tcg_temp_free_i32(r1);
2500 tcg_temp_free_i32(r2);
2501 set_cc_static(s);
2502 return NO_EXIT;
2505 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2507 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2508 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2509 potential_page_fault(s);
2510 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2511 tcg_temp_free_i32(r1);
2512 tcg_temp_free_i32(r3);
2513 set_cc_static(s);
2514 return NO_EXIT;
2517 #ifndef CONFIG_USER_ONLY
2518 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2520 int r1 = get_field(s->fields, l1);
2521 check_privileged(s);
2522 potential_page_fault(s);
2523 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2524 set_cc_static(s);
2525 return NO_EXIT;
2528 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2530 int r1 = get_field(s->fields, l1);
2531 check_privileged(s);
2532 potential_page_fault(s);
2533 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2534 set_cc_static(s);
2535 return NO_EXIT;
2537 #endif
2539 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2541 potential_page_fault(s);
2542 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2543 set_cc_static(s);
2544 return NO_EXIT;
2547 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2549 potential_page_fault(s);
2550 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2551 set_cc_static(s);
2552 return_low128(o->in2);
2553 return NO_EXIT;
2556 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2558 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2559 return NO_EXIT;
2562 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2564 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2565 return NO_EXIT;
2568 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2570 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2571 return NO_EXIT;
2574 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2576 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2577 return NO_EXIT;
2580 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2582 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2583 return NO_EXIT;
2586 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2588 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2589 return_low128(o->out2);
2590 return NO_EXIT;
2593 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2595 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2596 return_low128(o->out2);
2597 return NO_EXIT;
2600 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2602 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2603 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2604 tcg_temp_free_i64(r3);
2605 return NO_EXIT;
2608 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2610 int r3 = get_field(s->fields, r3);
2611 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2612 return NO_EXIT;
2615 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2617 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2618 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2619 tcg_temp_free_i64(r3);
2620 return NO_EXIT;
2623 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2625 int r3 = get_field(s->fields, r3);
2626 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2627 return NO_EXIT;
2630 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2632 gen_helper_nabs_i64(o->out, o->in2);
2633 return NO_EXIT;
2636 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2638 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2639 return NO_EXIT;
2642 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2644 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2645 return NO_EXIT;
2648 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2650 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2651 tcg_gen_mov_i64(o->out2, o->in2);
2652 return NO_EXIT;
2655 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2657 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2658 potential_page_fault(s);
2659 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2660 tcg_temp_free_i32(l);
2661 set_cc_static(s);
2662 return NO_EXIT;
2665 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2667 tcg_gen_neg_i64(o->out, o->in2);
2668 return NO_EXIT;
2671 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2673 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2674 return NO_EXIT;
2677 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2679 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2680 return NO_EXIT;
2683 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2685 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2686 tcg_gen_mov_i64(o->out2, o->in2);
2687 return NO_EXIT;
2690 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2692 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2693 potential_page_fault(s);
2694 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2695 tcg_temp_free_i32(l);
2696 set_cc_static(s);
2697 return NO_EXIT;
2700 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2702 tcg_gen_or_i64(o->out, o->in1, o->in2);
2703 return NO_EXIT;
2706 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2708 int shift = s->insn->data & 0xff;
2709 int size = s->insn->data >> 8;
2710 uint64_t mask = ((1ull << size) - 1) << shift;
2712 assert(!o->g_in2);
2713 tcg_gen_shli_i64(o->in2, o->in2, shift);
2714 tcg_gen_or_i64(o->out, o->in1, o->in2);
2716 /* Produce the CC from only the bits manipulated. */
2717 tcg_gen_andi_i64(cc_dst, o->out, mask);
2718 set_cc_nz_u64(s, cc_dst);
2719 return NO_EXIT;
2722 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2724 gen_helper_popcnt(o->out, o->in2);
2725 return NO_EXIT;
2728 #ifndef CONFIG_USER_ONLY
2729 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2731 check_privileged(s);
2732 gen_helper_ptlb(cpu_env);
2733 return NO_EXIT;
2735 #endif
2737 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2739 int i3 = get_field(s->fields, i3);
2740 int i4 = get_field(s->fields, i4);
2741 int i5 = get_field(s->fields, i5);
2742 int do_zero = i4 & 0x80;
2743 uint64_t mask, imask, pmask;
2744 int pos, len, rot;
2746 /* Adjust the arguments for the specific insn. */
2747 switch (s->fields->op2) {
2748 case 0x55: /* risbg */
2749 i3 &= 63;
2750 i4 &= 63;
2751 pmask = ~0;
2752 break;
2753 case 0x5d: /* risbhg */
2754 i3 &= 31;
2755 i4 &= 31;
2756 pmask = 0xffffffff00000000ull;
2757 break;
2758 case 0x51: /* risblg */
2759 i3 &= 31;
2760 i4 &= 31;
2761 pmask = 0x00000000ffffffffull;
2762 break;
2763 default:
2764 abort();
2767 /* MASK is the set of bits to be inserted from R2.
2768 Take care for I3/I4 wraparound. */
2769 mask = pmask >> i3;
2770 if (i3 <= i4) {
2771 mask ^= pmask >> i4 >> 1;
2772 } else {
2773 mask |= ~(pmask >> i4 >> 1);
2775 mask &= pmask;
2777 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2778 insns, we need to keep the other half of the register. */
2779 imask = ~mask | ~pmask;
2780 if (do_zero) {
2781 if (s->fields->op2 == 0x55) {
2782 imask = 0;
2783 } else {
2784 imask = ~pmask;
2788 /* In some cases we can implement this with deposit, which can be more
2789 efficient on some hosts. */
2790 if (~mask == imask && i3 <= i4) {
2791 if (s->fields->op2 == 0x5d) {
2792 i3 += 32, i4 += 32;
2794 /* Note that we rotate the bits to be inserted to the lsb, not to
2795 the position as described in the PoO. */
2796 len = i4 - i3 + 1;
2797 pos = 63 - i4;
2798 rot = (i5 - pos) & 63;
2799 } else {
2800 pos = len = -1;
2801 rot = i5 & 63;
2804 /* Rotate the input as necessary. */
2805 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2807 /* Insert the selected bits into the output. */
2808 if (pos >= 0) {
2809 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2810 } else if (imask == 0) {
2811 tcg_gen_andi_i64(o->out, o->in2, mask);
2812 } else {
2813 tcg_gen_andi_i64(o->in2, o->in2, mask);
2814 tcg_gen_andi_i64(o->out, o->out, imask);
2815 tcg_gen_or_i64(o->out, o->out, o->in2);
2817 return NO_EXIT;
2820 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2822 int i3 = get_field(s->fields, i3);
2823 int i4 = get_field(s->fields, i4);
2824 int i5 = get_field(s->fields, i5);
2825 uint64_t mask;
2827 /* If this is a test-only form, arrange to discard the result. */
2828 if (i3 & 0x80) {
2829 o->out = tcg_temp_new_i64();
2830 o->g_out = false;
2833 i3 &= 63;
2834 i4 &= 63;
2835 i5 &= 63;
2837 /* MASK is the set of bits to be operated on from R2.
2838 Take care for I3/I4 wraparound. */
2839 mask = ~0ull >> i3;
2840 if (i3 <= i4) {
2841 mask ^= ~0ull >> i4 >> 1;
2842 } else {
2843 mask |= ~(~0ull >> i4 >> 1);
2846 /* Rotate the input as necessary. */
2847 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2849 /* Operate. */
2850 switch (s->fields->op2) {
2851 case 0x55: /* AND */
2852 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2853 tcg_gen_and_i64(o->out, o->out, o->in2);
2854 break;
2855 case 0x56: /* OR */
2856 tcg_gen_andi_i64(o->in2, o->in2, mask);
2857 tcg_gen_or_i64(o->out, o->out, o->in2);
2858 break;
2859 case 0x57: /* XOR */
2860 tcg_gen_andi_i64(o->in2, o->in2, mask);
2861 tcg_gen_xor_i64(o->out, o->out, o->in2);
2862 break;
2863 default:
2864 abort();
2867 /* Set the CC. */
2868 tcg_gen_andi_i64(cc_dst, o->out, mask);
2869 set_cc_nz_u64(s, cc_dst);
2870 return NO_EXIT;
2873 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2875 tcg_gen_bswap16_i64(o->out, o->in2);
2876 return NO_EXIT;
2879 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2881 tcg_gen_bswap32_i64(o->out, o->in2);
2882 return NO_EXIT;
2885 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2887 tcg_gen_bswap64_i64(o->out, o->in2);
2888 return NO_EXIT;
2891 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2893 TCGv_i32 t1 = tcg_temp_new_i32();
2894 TCGv_i32 t2 = tcg_temp_new_i32();
2895 TCGv_i32 to = tcg_temp_new_i32();
2896 tcg_gen_trunc_i64_i32(t1, o->in1);
2897 tcg_gen_trunc_i64_i32(t2, o->in2);
2898 tcg_gen_rotl_i32(to, t1, t2);
2899 tcg_gen_extu_i32_i64(o->out, to);
2900 tcg_temp_free_i32(t1);
2901 tcg_temp_free_i32(t2);
2902 tcg_temp_free_i32(to);
2903 return NO_EXIT;
2906 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2908 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2909 return NO_EXIT;
2912 #ifndef CONFIG_USER_ONLY
2913 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2915 check_privileged(s);
2916 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2917 set_cc_static(s);
2918 return NO_EXIT;
2921 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2923 check_privileged(s);
2924 gen_helper_sacf(cpu_env, o->in2);
2925 /* Addressing mode has changed, so end the block. */
2926 return EXIT_PC_STALE;
2929 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
2931 int sam = s->insn->data;
2932 TCGv_i64 tsam = tcg_const_i64(sam);
2934 /* Overwrite PSW_MASK_64 and PSW_MASK_32 */
2935 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
2937 tcg_temp_free_i64(tsam);
2938 return EXIT_PC_STALE;
2940 #endif
2942 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2944 int r1 = get_field(s->fields, r1);
2945 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2946 return NO_EXIT;
2949 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2951 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2952 return NO_EXIT;
2955 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2957 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2958 return NO_EXIT;
2961 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2963 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2964 return_low128(o->out2);
2965 return NO_EXIT;
2968 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2970 gen_helper_sqeb(o->out, cpu_env, o->in2);
2971 return NO_EXIT;
2974 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2976 gen_helper_sqdb(o->out, cpu_env, o->in2);
2977 return NO_EXIT;
2980 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2982 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2983 return_low128(o->out2);
2984 return NO_EXIT;
2987 #ifndef CONFIG_USER_ONLY
2988 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2990 check_privileged(s);
2991 potential_page_fault(s);
2992 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2993 set_cc_static(s);
2994 return NO_EXIT;
2997 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2999 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3000 check_privileged(s);
3001 potential_page_fault(s);
3002 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3003 tcg_temp_free_i32(r1);
3004 return NO_EXIT;
3006 #endif
3008 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3010 DisasCompare c;
3011 TCGv_i64 a;
3012 int lab, r1;
3014 disas_jcc(s, &c, get_field(s->fields, m3));
3016 lab = gen_new_label();
3017 if (c.is_64) {
3018 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3019 } else {
3020 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3022 free_compare(&c);
3024 r1 = get_field(s->fields, r1);
3025 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3026 if (s->insn->data) {
3027 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3028 } else {
3029 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3031 tcg_temp_free_i64(a);
3033 gen_set_label(lab);
3034 return NO_EXIT;
3037 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3039 uint64_t sign = 1ull << s->insn->data;
3040 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3041 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3042 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3043 /* The arithmetic left shift is curious in that it does not affect
3044 the sign bit. Copy that over from the source unchanged. */
3045 tcg_gen_andi_i64(o->out, o->out, ~sign);
3046 tcg_gen_andi_i64(o->in1, o->in1, sign);
3047 tcg_gen_or_i64(o->out, o->out, o->in1);
3048 return NO_EXIT;
3051 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3053 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3054 return NO_EXIT;
3057 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3059 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3060 return NO_EXIT;
3063 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3065 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3066 return NO_EXIT;
3069 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3071 gen_helper_sfpc(cpu_env, o->in2);
3072 return NO_EXIT;
3075 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3077 gen_helper_sfas(cpu_env, o->in2);
3078 return NO_EXIT;
3081 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3083 int b2 = get_field(s->fields, b2);
3084 int d2 = get_field(s->fields, d2);
3085 TCGv_i64 t1 = tcg_temp_new_i64();
3086 TCGv_i64 t2 = tcg_temp_new_i64();
3087 int mask, pos, len;
3089 switch (s->fields->op2) {
3090 case 0x99: /* SRNM */
3091 pos = 0, len = 2;
3092 break;
3093 case 0xb8: /* SRNMB */
3094 pos = 0, len = 3;
3095 break;
3096 case 0xb9: /* SRNMT */
3097 pos = 4, len = 3;
3098 break;
3099 default:
3100 tcg_abort();
3102 mask = (1 << len) - 1;
3104 /* Insert the value into the appropriate field of the FPC. */
3105 if (b2 == 0) {
3106 tcg_gen_movi_i64(t1, d2 & mask);
3107 } else {
3108 tcg_gen_addi_i64(t1, regs[b2], d2);
3109 tcg_gen_andi_i64(t1, t1, mask);
3111 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3112 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3113 tcg_temp_free_i64(t1);
3115 /* Then install the new FPC to set the rounding mode in fpu_status. */
3116 gen_helper_sfpc(cpu_env, t2);
3117 tcg_temp_free_i64(t2);
3118 return NO_EXIT;
3121 #ifndef CONFIG_USER_ONLY
3122 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3124 check_privileged(s);
3125 tcg_gen_shri_i64(o->in2, o->in2, 4);
3126 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3127 return NO_EXIT;
3130 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3132 check_privileged(s);
3133 gen_helper_sske(cpu_env, o->in1, o->in2);
3134 return NO_EXIT;
3137 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3139 check_privileged(s);
3140 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3141 return NO_EXIT;
3144 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3146 check_privileged(s);
3147 /* ??? Surely cpu address != cpu number. In any case the previous
3148 version of this stored more than the required half-word, so it
3149 is unlikely this has ever been tested. */
3150 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3151 return NO_EXIT;
3154 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3156 gen_helper_stck(o->out, cpu_env);
3157 /* ??? We don't implement clock states. */
3158 gen_op_movi_cc(s, 0);
3159 return NO_EXIT;
3162 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3164 TCGv_i64 c1 = tcg_temp_new_i64();
3165 TCGv_i64 c2 = tcg_temp_new_i64();
3166 gen_helper_stck(c1, cpu_env);
3167 /* Shift the 64-bit value into its place as a zero-extended
3168 104-bit value. Note that "bit positions 64-103 are always
3169 non-zero so that they compare differently to STCK"; we set
3170 the least significant bit to 1. */
3171 tcg_gen_shli_i64(c2, c1, 56);
3172 tcg_gen_shri_i64(c1, c1, 8);
3173 tcg_gen_ori_i64(c2, c2, 0x10000);
3174 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3175 tcg_gen_addi_i64(o->in2, o->in2, 8);
3176 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3177 tcg_temp_free_i64(c1);
3178 tcg_temp_free_i64(c2);
3179 /* ??? We don't implement clock states. */
3180 gen_op_movi_cc(s, 0);
3181 return NO_EXIT;
3184 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3186 check_privileged(s);
3187 gen_helper_sckc(cpu_env, o->in2);
3188 return NO_EXIT;
3191 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3193 check_privileged(s);
3194 gen_helper_stckc(o->out, cpu_env);
3195 return NO_EXIT;
3198 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3200 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3201 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3202 check_privileged(s);
3203 potential_page_fault(s);
3204 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3205 tcg_temp_free_i32(r1);
3206 tcg_temp_free_i32(r3);
3207 return NO_EXIT;
3210 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3212 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3213 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3214 check_privileged(s);
3215 potential_page_fault(s);
3216 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3217 tcg_temp_free_i32(r1);
3218 tcg_temp_free_i32(r3);
3219 return NO_EXIT;
3222 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3224 check_privileged(s);
3225 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3226 return NO_EXIT;
3229 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3231 check_privileged(s);
3232 gen_helper_spt(cpu_env, o->in2);
3233 return NO_EXIT;
3236 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3238 TCGv_i64 f, a;
3239 /* We really ought to have more complete indication of facilities
3240 that we implement. Address this when STFLE is implemented. */
3241 check_privileged(s);
3242 f = tcg_const_i64(0xc0000000);
3243 a = tcg_const_i64(200);
3244 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3245 tcg_temp_free_i64(f);
3246 tcg_temp_free_i64(a);
3247 return NO_EXIT;
3250 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3252 check_privileged(s);
3253 gen_helper_stpt(o->out, cpu_env);
3254 return NO_EXIT;
3257 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3259 check_privileged(s);
3260 potential_page_fault(s);
3261 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3262 set_cc_static(s);
3263 return NO_EXIT;
3266 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3268 check_privileged(s);
3269 gen_helper_spx(cpu_env, o->in2);
3270 return NO_EXIT;
3273 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3275 check_privileged(s);
3276 /* Not operational. */
3277 gen_op_movi_cc(s, 3);
3278 return NO_EXIT;
3281 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3283 check_privileged(s);
3284 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3285 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3286 return NO_EXIT;
3289 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3291 uint64_t i2 = get_field(s->fields, i2);
3292 TCGv_i64 t;
3294 check_privileged(s);
3296 /* It is important to do what the instruction name says: STORE THEN.
3297 If we let the output hook perform the store then if we fault and
3298 restart, we'll have the wrong SYSTEM MASK in place. */
3299 t = tcg_temp_new_i64();
3300 tcg_gen_shri_i64(t, psw_mask, 56);
3301 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3302 tcg_temp_free_i64(t);
3304 if (s->fields->op == 0xac) {
3305 tcg_gen_andi_i64(psw_mask, psw_mask,
3306 (i2 << 56) | 0x00ffffffffffffffull);
3307 } else {
3308 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3310 return NO_EXIT;
3313 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3315 check_privileged(s);
3316 potential_page_fault(s);
3317 gen_helper_stura(cpu_env, o->in2, o->in1);
3318 return NO_EXIT;
3320 #endif
3322 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3324 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3325 return NO_EXIT;
3328 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3330 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3331 return NO_EXIT;
3334 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3336 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3337 return NO_EXIT;
3340 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3342 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3343 return NO_EXIT;
3346 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3348 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3349 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3350 potential_page_fault(s);
3351 gen_helper_stam(cpu_env, r1, o->in2, r3);
3352 tcg_temp_free_i32(r1);
3353 tcg_temp_free_i32(r3);
3354 return NO_EXIT;
3357 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3359 int m3 = get_field(s->fields, m3);
3360 int pos, base = s->insn->data;
3361 TCGv_i64 tmp = tcg_temp_new_i64();
3363 pos = base + ctz32(m3) * 8;
3364 switch (m3) {
3365 case 0xf:
3366 /* Effectively a 32-bit store. */
3367 tcg_gen_shri_i64(tmp, o->in1, pos);
3368 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3369 break;
3371 case 0xc:
3372 case 0x6:
3373 case 0x3:
3374 /* Effectively a 16-bit store. */
3375 tcg_gen_shri_i64(tmp, o->in1, pos);
3376 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3377 break;
3379 case 0x8:
3380 case 0x4:
3381 case 0x2:
3382 case 0x1:
3383 /* Effectively an 8-bit store. */
3384 tcg_gen_shri_i64(tmp, o->in1, pos);
3385 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3386 break;
3388 default:
3389 /* This is going to be a sequence of shifts and stores. */
3390 pos = base + 32 - 8;
3391 while (m3) {
3392 if (m3 & 0x8) {
3393 tcg_gen_shri_i64(tmp, o->in1, pos);
3394 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3395 tcg_gen_addi_i64(o->in2, o->in2, 1);
3397 m3 = (m3 << 1) & 0xf;
3398 pos -= 8;
3400 break;
3402 tcg_temp_free_i64(tmp);
3403 return NO_EXIT;
3406 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3408 int r1 = get_field(s->fields, r1);
3409 int r3 = get_field(s->fields, r3);
3410 int size = s->insn->data;
3411 TCGv_i64 tsize = tcg_const_i64(size);
3413 while (1) {
3414 if (size == 8) {
3415 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3416 } else {
3417 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3419 if (r1 == r3) {
3420 break;
3422 tcg_gen_add_i64(o->in2, o->in2, tsize);
3423 r1 = (r1 + 1) & 15;
3426 tcg_temp_free_i64(tsize);
3427 return NO_EXIT;
3430 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3432 int r1 = get_field(s->fields, r1);
3433 int r3 = get_field(s->fields, r3);
3434 TCGv_i64 t = tcg_temp_new_i64();
3435 TCGv_i64 t4 = tcg_const_i64(4);
3436 TCGv_i64 t32 = tcg_const_i64(32);
3438 while (1) {
3439 tcg_gen_shl_i64(t, regs[r1], t32);
3440 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3441 if (r1 == r3) {
3442 break;
3444 tcg_gen_add_i64(o->in2, o->in2, t4);
3445 r1 = (r1 + 1) & 15;
3448 tcg_temp_free_i64(t);
3449 tcg_temp_free_i64(t4);
3450 tcg_temp_free_i64(t32);
3451 return NO_EXIT;
3454 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3456 potential_page_fault(s);
3457 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3458 set_cc_static(s);
3459 return_low128(o->in2);
3460 return NO_EXIT;
3463 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3465 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3466 return NO_EXIT;
3469 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3471 DisasCompare cmp;
3472 TCGv_i64 borrow;
3474 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3476 /* The !borrow flag is the msb of CC. Since we want the inverse of
3477 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3478 disas_jcc(s, &cmp, 8 | 4);
3479 borrow = tcg_temp_new_i64();
3480 if (cmp.is_64) {
3481 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3482 } else {
3483 TCGv_i32 t = tcg_temp_new_i32();
3484 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3485 tcg_gen_extu_i32_i64(borrow, t);
3486 tcg_temp_free_i32(t);
3488 free_compare(&cmp);
3490 tcg_gen_sub_i64(o->out, o->out, borrow);
3491 tcg_temp_free_i64(borrow);
3492 return NO_EXIT;
3495 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3497 TCGv_i32 t;
3499 update_psw_addr(s);
3500 update_cc_op(s);
3502 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3503 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3504 tcg_temp_free_i32(t);
3506 t = tcg_const_i32(s->next_pc - s->pc);
3507 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3508 tcg_temp_free_i32(t);
3510 gen_exception(EXCP_SVC);
3511 return EXIT_NORETURN;
3514 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3516 gen_helper_tceb(cc_op, o->in1, o->in2);
3517 set_cc_static(s);
3518 return NO_EXIT;
3521 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3523 gen_helper_tcdb(cc_op, o->in1, o->in2);
3524 set_cc_static(s);
3525 return NO_EXIT;
3528 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3530 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3531 set_cc_static(s);
3532 return NO_EXIT;
3535 #ifndef CONFIG_USER_ONLY
3536 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3538 potential_page_fault(s);
3539 gen_helper_tprot(cc_op, o->addr1, o->in2);
3540 set_cc_static(s);
3541 return NO_EXIT;
3543 #endif
3545 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3547 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3548 potential_page_fault(s);
3549 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3550 tcg_temp_free_i32(l);
3551 set_cc_static(s);
3552 return NO_EXIT;
3555 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3557 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3558 potential_page_fault(s);
3559 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3560 tcg_temp_free_i32(l);
3561 return NO_EXIT;
3564 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3566 int d1 = get_field(s->fields, d1);
3567 int d2 = get_field(s->fields, d2);
3568 int b1 = get_field(s->fields, b1);
3569 int b2 = get_field(s->fields, b2);
3570 int l = get_field(s->fields, l1);
3571 TCGv_i32 t32;
3573 o->addr1 = get_address(s, 0, b1, d1);
3575 /* If the addresses are identical, this is a store/memset of zero. */
3576 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3577 o->in2 = tcg_const_i64(0);
3579 l++;
3580 while (l >= 8) {
3581 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3582 l -= 8;
3583 if (l > 0) {
3584 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3587 if (l >= 4) {
3588 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3589 l -= 4;
3590 if (l > 0) {
3591 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3594 if (l >= 2) {
3595 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3596 l -= 2;
3597 if (l > 0) {
3598 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3601 if (l) {
3602 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3604 gen_op_movi_cc(s, 0);
3605 return NO_EXIT;
3608 /* But in general we'll defer to a helper. */
3609 o->in2 = get_address(s, 0, b2, d2);
3610 t32 = tcg_const_i32(l);
3611 potential_page_fault(s);
3612 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3613 tcg_temp_free_i32(t32);
3614 set_cc_static(s);
3615 return NO_EXIT;
3618 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3620 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3621 return NO_EXIT;
3624 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3626 int shift = s->insn->data & 0xff;
3627 int size = s->insn->data >> 8;
3628 uint64_t mask = ((1ull << size) - 1) << shift;
3630 assert(!o->g_in2);
3631 tcg_gen_shli_i64(o->in2, o->in2, shift);
3632 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3634 /* Produce the CC from only the bits manipulated. */
3635 tcg_gen_andi_i64(cc_dst, o->out, mask);
3636 set_cc_nz_u64(s, cc_dst);
3637 return NO_EXIT;
3640 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3642 o->out = tcg_const_i64(0);
3643 return NO_EXIT;
3646 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3648 o->out = tcg_const_i64(0);
3649 o->out2 = o->out;
3650 o->g_out2 = true;
3651 return NO_EXIT;
3654 /* ====================================================================== */
3655 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3656 the original inputs), update the various cc data structures in order to
3657 be able to compute the new condition code. */
3659 static void cout_abs32(DisasContext *s, DisasOps *o)
3661 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3664 static void cout_abs64(DisasContext *s, DisasOps *o)
3666 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3669 static void cout_adds32(DisasContext *s, DisasOps *o)
3671 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3674 static void cout_adds64(DisasContext *s, DisasOps *o)
3676 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3679 static void cout_addu32(DisasContext *s, DisasOps *o)
3681 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3684 static void cout_addu64(DisasContext *s, DisasOps *o)
3686 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3689 static void cout_addc32(DisasContext *s, DisasOps *o)
3691 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3694 static void cout_addc64(DisasContext *s, DisasOps *o)
3696 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3699 static void cout_cmps32(DisasContext *s, DisasOps *o)
3701 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3704 static void cout_cmps64(DisasContext *s, DisasOps *o)
3706 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3709 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3711 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3714 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3716 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3719 static void cout_f32(DisasContext *s, DisasOps *o)
3721 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3724 static void cout_f64(DisasContext *s, DisasOps *o)
3726 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3729 static void cout_f128(DisasContext *s, DisasOps *o)
3731 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3734 static void cout_nabs32(DisasContext *s, DisasOps *o)
3736 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3739 static void cout_nabs64(DisasContext *s, DisasOps *o)
3741 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3744 static void cout_neg32(DisasContext *s, DisasOps *o)
3746 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3749 static void cout_neg64(DisasContext *s, DisasOps *o)
3751 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3754 static void cout_nz32(DisasContext *s, DisasOps *o)
3756 tcg_gen_ext32u_i64(cc_dst, o->out);
3757 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3760 static void cout_nz64(DisasContext *s, DisasOps *o)
3762 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3765 static void cout_s32(DisasContext *s, DisasOps *o)
3767 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3770 static void cout_s64(DisasContext *s, DisasOps *o)
3772 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3775 static void cout_subs32(DisasContext *s, DisasOps *o)
3777 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3780 static void cout_subs64(DisasContext *s, DisasOps *o)
3782 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3785 static void cout_subu32(DisasContext *s, DisasOps *o)
3787 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3790 static void cout_subu64(DisasContext *s, DisasOps *o)
3792 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3795 static void cout_subb32(DisasContext *s, DisasOps *o)
3797 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3800 static void cout_subb64(DisasContext *s, DisasOps *o)
3802 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3805 static void cout_tm32(DisasContext *s, DisasOps *o)
3807 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3810 static void cout_tm64(DisasContext *s, DisasOps *o)
3812 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3815 /* ====================================================================== */
3816 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3817 with the TCG register to which we will write. Used in combination with
3818 the "wout" generators, in some cases we need a new temporary, and in
3819 some cases we can write to a TCG global. */
3821 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3823 o->out = tcg_temp_new_i64();
3825 #define SPEC_prep_new 0
3827 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3829 o->out = tcg_temp_new_i64();
3830 o->out2 = tcg_temp_new_i64();
3832 #define SPEC_prep_new_P 0
3834 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3836 o->out = regs[get_field(f, r1)];
3837 o->g_out = true;
3839 #define SPEC_prep_r1 0
3841 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3843 int r1 = get_field(f, r1);
3844 o->out = regs[r1];
3845 o->out2 = regs[r1 + 1];
3846 o->g_out = o->g_out2 = true;
3848 #define SPEC_prep_r1_P SPEC_r1_even
3850 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3852 o->out = fregs[get_field(f, r1)];
3853 o->g_out = true;
3855 #define SPEC_prep_f1 0
3857 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3859 int r1 = get_field(f, r1);
3860 o->out = fregs[r1];
3861 o->out2 = fregs[r1 + 2];
3862 o->g_out = o->g_out2 = true;
3864 #define SPEC_prep_x1 SPEC_r1_f128
3866 /* ====================================================================== */
3867 /* The "Write OUTput" generators. These generally perform some non-trivial
3868 copy of data to TCG globals, or to main memory. The trivial cases are
3869 generally handled by having a "prep" generator install the TCG global
3870 as the destination of the operation. */
3872 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3874 store_reg(get_field(f, r1), o->out);
3876 #define SPEC_wout_r1 0
3878 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3880 int r1 = get_field(f, r1);
3881 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3883 #define SPEC_wout_r1_8 0
3885 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3887 int r1 = get_field(f, r1);
3888 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3890 #define SPEC_wout_r1_16 0
3892 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3894 store_reg32_i64(get_field(f, r1), o->out);
3896 #define SPEC_wout_r1_32 0
3898 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3900 int r1 = get_field(f, r1);
3901 store_reg32_i64(r1, o->out);
3902 store_reg32_i64(r1 + 1, o->out2);
3904 #define SPEC_wout_r1_P32 SPEC_r1_even
3906 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3908 int r1 = get_field(f, r1);
3909 store_reg32_i64(r1 + 1, o->out);
3910 tcg_gen_shri_i64(o->out, o->out, 32);
3911 store_reg32_i64(r1, o->out);
3913 #define SPEC_wout_r1_D32 SPEC_r1_even
3915 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3917 store_freg32_i64(get_field(f, r1), o->out);
3919 #define SPEC_wout_e1 0
3921 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3923 store_freg(get_field(f, r1), o->out);
3925 #define SPEC_wout_f1 0
3927 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3929 int f1 = get_field(s->fields, r1);
3930 store_freg(f1, o->out);
3931 store_freg(f1 + 2, o->out2);
3933 #define SPEC_wout_x1 SPEC_r1_f128
3935 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3937 if (get_field(f, r1) != get_field(f, r2)) {
3938 store_reg32_i64(get_field(f, r1), o->out);
3941 #define SPEC_wout_cond_r1r2_32 0
3943 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3945 if (get_field(f, r1) != get_field(f, r2)) {
3946 store_freg32_i64(get_field(f, r1), o->out);
3949 #define SPEC_wout_cond_e1e2 0
3951 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3953 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3955 #define SPEC_wout_m1_8 0
3957 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3959 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3961 #define SPEC_wout_m1_16 0
3963 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3965 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3967 #define SPEC_wout_m1_32 0
3969 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3971 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3973 #define SPEC_wout_m1_64 0
3975 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3977 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3979 #define SPEC_wout_m2_32 0
3981 /* ====================================================================== */
3982 /* The "INput 1" generators. These load the first operand to an insn. */
3984 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3986 o->in1 = load_reg(get_field(f, r1));
3988 #define SPEC_in1_r1 0
3990 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3992 o->in1 = regs[get_field(f, r1)];
3993 o->g_in1 = true;
3995 #define SPEC_in1_r1_o 0
3997 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3999 o->in1 = tcg_temp_new_i64();
4000 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4002 #define SPEC_in1_r1_32s 0
4004 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4006 o->in1 = tcg_temp_new_i64();
4007 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4009 #define SPEC_in1_r1_32u 0
4011 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4013 o->in1 = tcg_temp_new_i64();
4014 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4016 #define SPEC_in1_r1_sr32 0
4018 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4020 o->in1 = load_reg(get_field(f, r1) + 1);
4022 #define SPEC_in1_r1p1 SPEC_r1_even
4024 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4026 o->in1 = tcg_temp_new_i64();
4027 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4029 #define SPEC_in1_r1p1_32s SPEC_r1_even
4031 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4033 o->in1 = tcg_temp_new_i64();
4034 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4036 #define SPEC_in1_r1p1_32u SPEC_r1_even
4038 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4040 int r1 = get_field(f, r1);
4041 o->in1 = tcg_temp_new_i64();
4042 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4044 #define SPEC_in1_r1_D32 SPEC_r1_even
4046 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4048 o->in1 = load_reg(get_field(f, r2));
4050 #define SPEC_in1_r2 0
4052 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4054 o->in1 = load_reg(get_field(f, r3));
4056 #define SPEC_in1_r3 0
4058 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4060 o->in1 = regs[get_field(f, r3)];
4061 o->g_in1 = true;
4063 #define SPEC_in1_r3_o 0
4065 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4067 o->in1 = tcg_temp_new_i64();
4068 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4070 #define SPEC_in1_r3_32s 0
4072 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4074 o->in1 = tcg_temp_new_i64();
4075 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4077 #define SPEC_in1_r3_32u 0
4079 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4081 int r3 = get_field(f, r3);
4082 o->in1 = tcg_temp_new_i64();
4083 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4085 #define SPEC_in1_r3_D32 SPEC_r3_even
4087 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4089 o->in1 = load_freg32_i64(get_field(f, r1));
4091 #define SPEC_in1_e1 0
4093 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4095 o->in1 = fregs[get_field(f, r1)];
4096 o->g_in1 = true;
4098 #define SPEC_in1_f1_o 0
4100 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4102 int r1 = get_field(f, r1);
4103 o->out = fregs[r1];
4104 o->out2 = fregs[r1 + 2];
4105 o->g_out = o->g_out2 = true;
4107 #define SPEC_in1_x1_o SPEC_r1_f128
4109 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4111 o->in1 = fregs[get_field(f, r3)];
4112 o->g_in1 = true;
4114 #define SPEC_in1_f3_o 0
4116 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4118 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4120 #define SPEC_in1_la1 0
4122 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4124 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4125 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4127 #define SPEC_in1_la2 0
4129 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4131 in1_la1(s, f, o);
4132 o->in1 = tcg_temp_new_i64();
4133 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4135 #define SPEC_in1_m1_8u 0
4137 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4139 in1_la1(s, f, o);
4140 o->in1 = tcg_temp_new_i64();
4141 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4143 #define SPEC_in1_m1_16s 0
4145 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4147 in1_la1(s, f, o);
4148 o->in1 = tcg_temp_new_i64();
4149 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4151 #define SPEC_in1_m1_16u 0
4153 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4155 in1_la1(s, f, o);
4156 o->in1 = tcg_temp_new_i64();
4157 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4159 #define SPEC_in1_m1_32s 0
4161 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4163 in1_la1(s, f, o);
4164 o->in1 = tcg_temp_new_i64();
4165 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4167 #define SPEC_in1_m1_32u 0
4169 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4171 in1_la1(s, f, o);
4172 o->in1 = tcg_temp_new_i64();
4173 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4175 #define SPEC_in1_m1_64 0
4177 /* ====================================================================== */
4178 /* The "INput 2" generators. These load the second operand to an insn. */
4180 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4182 o->in2 = regs[get_field(f, r1)];
4183 o->g_in2 = true;
4185 #define SPEC_in2_r1_o 0
4187 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4189 o->in2 = tcg_temp_new_i64();
4190 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4192 #define SPEC_in2_r1_16u 0
4194 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4196 o->in2 = tcg_temp_new_i64();
4197 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4199 #define SPEC_in2_r1_32u 0
4201 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4203 int r1 = get_field(f, r1);
4204 o->in2 = tcg_temp_new_i64();
4205 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4207 #define SPEC_in2_r1_D32 SPEC_r1_even
4209 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4211 o->in2 = load_reg(get_field(f, r2));
4213 #define SPEC_in2_r2 0
4215 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4217 o->in2 = regs[get_field(f, r2)];
4218 o->g_in2 = true;
4220 #define SPEC_in2_r2_o 0
4222 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4224 int r2 = get_field(f, r2);
4225 if (r2 != 0) {
4226 o->in2 = load_reg(r2);
4229 #define SPEC_in2_r2_nz 0
4231 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4233 o->in2 = tcg_temp_new_i64();
4234 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4236 #define SPEC_in2_r2_8s 0
4238 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4240 o->in2 = tcg_temp_new_i64();
4241 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4243 #define SPEC_in2_r2_8u 0
4245 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4247 o->in2 = tcg_temp_new_i64();
4248 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4250 #define SPEC_in2_r2_16s 0
4252 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4254 o->in2 = tcg_temp_new_i64();
4255 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4257 #define SPEC_in2_r2_16u 0
4259 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4261 o->in2 = load_reg(get_field(f, r3));
4263 #define SPEC_in2_r3 0
4265 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4267 o->in2 = tcg_temp_new_i64();
4268 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4270 #define SPEC_in2_r2_32s 0
4272 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4274 o->in2 = tcg_temp_new_i64();
4275 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4277 #define SPEC_in2_r2_32u 0
4279 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4281 o->in2 = load_freg32_i64(get_field(f, r2));
4283 #define SPEC_in2_e2 0
4285 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4287 o->in2 = fregs[get_field(f, r2)];
4288 o->g_in2 = true;
4290 #define SPEC_in2_f2_o 0
4292 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4294 int r2 = get_field(f, r2);
4295 o->in1 = fregs[r2];
4296 o->in2 = fregs[r2 + 2];
4297 o->g_in1 = o->g_in2 = true;
4299 #define SPEC_in2_x2_o SPEC_r2_f128
4301 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4303 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4305 #define SPEC_in2_ra2 0
4307 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4309 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4310 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4312 #define SPEC_in2_a2 0
4314 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4316 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4318 #define SPEC_in2_ri2 0
4320 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4322 help_l2_shift(s, f, o, 31);
4324 #define SPEC_in2_sh32 0
4326 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4328 help_l2_shift(s, f, o, 63);
4330 #define SPEC_in2_sh64 0
4332 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4334 in2_a2(s, f, o);
4335 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4337 #define SPEC_in2_m2_8u 0
4339 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4341 in2_a2(s, f, o);
4342 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4344 #define SPEC_in2_m2_16s 0
4346 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4348 in2_a2(s, f, o);
4349 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4351 #define SPEC_in2_m2_16u 0
4353 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4355 in2_a2(s, f, o);
4356 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4358 #define SPEC_in2_m2_32s 0
4360 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4362 in2_a2(s, f, o);
4363 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4365 #define SPEC_in2_m2_32u 0
4367 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4369 in2_a2(s, f, o);
4370 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4372 #define SPEC_in2_m2_64 0
4374 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4376 in2_ri2(s, f, o);
4377 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4379 #define SPEC_in2_mri2_16u 0
4381 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4383 in2_ri2(s, f, o);
4384 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4386 #define SPEC_in2_mri2_32s 0
4388 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4390 in2_ri2(s, f, o);
4391 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4393 #define SPEC_in2_mri2_32u 0
4395 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4397 in2_ri2(s, f, o);
4398 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4400 #define SPEC_in2_mri2_64 0
4402 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4404 o->in2 = tcg_const_i64(get_field(f, i2));
4406 #define SPEC_in2_i2 0
4408 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4410 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4412 #define SPEC_in2_i2_8u 0
4414 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4416 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4418 #define SPEC_in2_i2_16u 0
4420 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4422 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4424 #define SPEC_in2_i2_32u 0
4426 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4428 uint64_t i2 = (uint16_t)get_field(f, i2);
4429 o->in2 = tcg_const_i64(i2 << s->insn->data);
4431 #define SPEC_in2_i2_16u_shl 0
4433 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4435 uint64_t i2 = (uint32_t)get_field(f, i2);
4436 o->in2 = tcg_const_i64(i2 << s->insn->data);
4438 #define SPEC_in2_i2_32u_shl 0
4440 /* ====================================================================== */
4442 /* Find opc within the table of insns. This is formulated as a switch
4443 statement so that (1) we get compile-time notice of cut-paste errors
4444 for duplicated opcodes, and (2) the compiler generates the binary
4445 search tree, rather than us having to post-process the table. */
4447 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4448 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4450 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4452 enum DisasInsnEnum {
4453 #include "insn-data.def"
4456 #undef D
4457 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4458 .opc = OPC, \
4459 .fmt = FMT_##FT, \
4460 .fac = FAC_##FC, \
4461 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4462 .name = #NM, \
4463 .help_in1 = in1_##I1, \
4464 .help_in2 = in2_##I2, \
4465 .help_prep = prep_##P, \
4466 .help_wout = wout_##W, \
4467 .help_cout = cout_##CC, \
4468 .help_op = op_##OP, \
4469 .data = D \
4472 /* Allow 0 to be used for NULL in the table below. */
4473 #define in1_0 NULL
4474 #define in2_0 NULL
4475 #define prep_0 NULL
4476 #define wout_0 NULL
4477 #define cout_0 NULL
4478 #define op_0 NULL
4480 #define SPEC_in1_0 0
4481 #define SPEC_in2_0 0
4482 #define SPEC_prep_0 0
4483 #define SPEC_wout_0 0
4485 static const DisasInsn insn_info[] = {
4486 #include "insn-data.def"
4489 #undef D
4490 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4491 case OPC: return &insn_info[insn_ ## NM];
4493 static const DisasInsn *lookup_opc(uint16_t opc)
4495 switch (opc) {
4496 #include "insn-data.def"
4497 default:
4498 return NULL;
4502 #undef D
4503 #undef C
4505 /* Extract a field from the insn. The INSN should be left-aligned in
4506 the uint64_t so that we can more easily utilize the big-bit-endian
4507 definitions we extract from the Principals of Operation. */
4509 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4511 uint32_t r, m;
4513 if (f->size == 0) {
4514 return;
4517 /* Zero extract the field from the insn. */
4518 r = (insn << f->beg) >> (64 - f->size);
4520 /* Sign-extend, or un-swap the field as necessary. */
4521 switch (f->type) {
4522 case 0: /* unsigned */
4523 break;
4524 case 1: /* signed */
4525 assert(f->size <= 32);
4526 m = 1u << (f->size - 1);
4527 r = (r ^ m) - m;
4528 break;
4529 case 2: /* dl+dh split, signed 20 bit. */
4530 r = ((int8_t)r << 12) | (r >> 8);
4531 break;
4532 default:
4533 abort();
4536 /* Validate that the "compressed" encoding we selected above is valid.
4537 I.e. we havn't make two different original fields overlap. */
4538 assert(((o->presentC >> f->indexC) & 1) == 0);
4539 o->presentC |= 1 << f->indexC;
4540 o->presentO |= 1 << f->indexO;
4542 o->c[f->indexC] = r;
4545 /* Lookup the insn at the current PC, extracting the operands into O and
4546 returning the info struct for the insn. Returns NULL for invalid insn. */
4548 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4549 DisasFields *f)
4551 uint64_t insn, pc = s->pc;
4552 int op, op2, ilen;
4553 const DisasInsn *info;
4555 insn = ld_code2(env, pc);
4556 op = (insn >> 8) & 0xff;
4557 ilen = get_ilen(op);
4558 s->next_pc = s->pc + ilen;
4560 switch (ilen) {
4561 case 2:
4562 insn = insn << 48;
4563 break;
4564 case 4:
4565 insn = ld_code4(env, pc) << 32;
4566 break;
4567 case 6:
4568 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4569 break;
4570 default:
4571 abort();
4574 /* We can't actually determine the insn format until we've looked up
4575 the full insn opcode. Which we can't do without locating the
4576 secondary opcode. Assume by default that OP2 is at bit 40; for
4577 those smaller insns that don't actually have a secondary opcode
4578 this will correctly result in OP2 = 0. */
4579 switch (op) {
4580 case 0x01: /* E */
4581 case 0x80: /* S */
4582 case 0x82: /* S */
4583 case 0x93: /* S */
4584 case 0xb2: /* S, RRF, RRE */
4585 case 0xb3: /* RRE, RRD, RRF */
4586 case 0xb9: /* RRE, RRF */
4587 case 0xe5: /* SSE, SIL */
4588 op2 = (insn << 8) >> 56;
4589 break;
4590 case 0xa5: /* RI */
4591 case 0xa7: /* RI */
4592 case 0xc0: /* RIL */
4593 case 0xc2: /* RIL */
4594 case 0xc4: /* RIL */
4595 case 0xc6: /* RIL */
4596 case 0xc8: /* SSF */
4597 case 0xcc: /* RIL */
4598 op2 = (insn << 12) >> 60;
4599 break;
4600 case 0xd0 ... 0xdf: /* SS */
4601 case 0xe1: /* SS */
4602 case 0xe2: /* SS */
4603 case 0xe8: /* SS */
4604 case 0xe9: /* SS */
4605 case 0xea: /* SS */
4606 case 0xee ... 0xf3: /* SS */
4607 case 0xf8 ... 0xfd: /* SS */
4608 op2 = 0;
4609 break;
4610 default:
4611 op2 = (insn << 40) >> 56;
4612 break;
4615 memset(f, 0, sizeof(*f));
4616 f->op = op;
4617 f->op2 = op2;
4619 /* Lookup the instruction. */
4620 info = lookup_opc(op << 8 | op2);
4622 /* If we found it, extract the operands. */
4623 if (info != NULL) {
4624 DisasFormat fmt = info->fmt;
4625 int i;
4627 for (i = 0; i < NUM_C_FIELD; ++i) {
4628 extract_field(f, &format_info[fmt].op[i], insn);
4631 return info;
4634 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4636 const DisasInsn *insn;
4637 ExitStatus ret = NO_EXIT;
4638 DisasFields f;
4639 DisasOps o;
4641 /* Search for the insn in the table. */
4642 insn = extract_insn(env, s, &f);
4644 /* Not found means unimplemented/illegal opcode. */
4645 if (insn == NULL) {
4646 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4647 f.op, f.op2);
4648 gen_illegal_opcode(s);
4649 return EXIT_NORETURN;
4652 /* Check for insn specification exceptions. */
4653 if (insn->spec) {
4654 int spec = insn->spec, excp = 0, r;
4656 if (spec & SPEC_r1_even) {
4657 r = get_field(&f, r1);
4658 if (r & 1) {
4659 excp = PGM_SPECIFICATION;
4662 if (spec & SPEC_r2_even) {
4663 r = get_field(&f, r2);
4664 if (r & 1) {
4665 excp = PGM_SPECIFICATION;
4668 if (spec & SPEC_r3_even) {
4669 r = get_field(&f, r3);
4670 if (r & 1) {
4671 excp = PGM_SPECIFICATION;
4674 if (spec & SPEC_r1_f128) {
4675 r = get_field(&f, r1);
4676 if (r > 13) {
4677 excp = PGM_SPECIFICATION;
4680 if (spec & SPEC_r2_f128) {
4681 r = get_field(&f, r2);
4682 if (r > 13) {
4683 excp = PGM_SPECIFICATION;
4686 if (excp) {
4687 gen_program_exception(s, excp);
4688 return EXIT_NORETURN;
4692 /* Set up the strutures we use to communicate with the helpers. */
4693 s->insn = insn;
4694 s->fields = &f;
4695 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4696 TCGV_UNUSED_I64(o.out);
4697 TCGV_UNUSED_I64(o.out2);
4698 TCGV_UNUSED_I64(o.in1);
4699 TCGV_UNUSED_I64(o.in2);
4700 TCGV_UNUSED_I64(o.addr1);
4702 /* Implement the instruction. */
4703 if (insn->help_in1) {
4704 insn->help_in1(s, &f, &o);
4706 if (insn->help_in2) {
4707 insn->help_in2(s, &f, &o);
4709 if (insn->help_prep) {
4710 insn->help_prep(s, &f, &o);
4712 if (insn->help_op) {
4713 ret = insn->help_op(s, &o);
4715 if (insn->help_wout) {
4716 insn->help_wout(s, &f, &o);
4718 if (insn->help_cout) {
4719 insn->help_cout(s, &o);
4722 /* Free any temporaries created by the helpers. */
4723 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4724 tcg_temp_free_i64(o.out);
4726 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4727 tcg_temp_free_i64(o.out2);
4729 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4730 tcg_temp_free_i64(o.in1);
4732 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4733 tcg_temp_free_i64(o.in2);
4735 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4736 tcg_temp_free_i64(o.addr1);
4739 /* Advance to the next instruction. */
4740 s->pc = s->next_pc;
4741 return ret;
4744 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4745 TranslationBlock *tb,
4746 bool search_pc)
4748 CPUState *cs = CPU(cpu);
4749 CPUS390XState *env = &cpu->env;
4750 DisasContext dc;
4751 target_ulong pc_start;
4752 uint64_t next_page_start;
4753 uint16_t *gen_opc_end;
4754 int j, lj = -1;
4755 int num_insns, max_insns;
4756 CPUBreakpoint *bp;
4757 ExitStatus status;
4758 bool do_debug;
4760 pc_start = tb->pc;
4762 /* 31-bit mode */
4763 if (!(tb->flags & FLAG_MASK_64)) {
4764 pc_start &= 0x7fffffff;
4767 dc.tb = tb;
4768 dc.pc = pc_start;
4769 dc.cc_op = CC_OP_DYNAMIC;
4770 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4772 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4774 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4776 num_insns = 0;
4777 max_insns = tb->cflags & CF_COUNT_MASK;
4778 if (max_insns == 0) {
4779 max_insns = CF_COUNT_MASK;
4782 gen_tb_start();
4784 do {
4785 if (search_pc) {
4786 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4787 if (lj < j) {
4788 lj++;
4789 while (lj < j) {
4790 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4793 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4794 gen_opc_cc_op[lj] = dc.cc_op;
4795 tcg_ctx.gen_opc_instr_start[lj] = 1;
4796 tcg_ctx.gen_opc_icount[lj] = num_insns;
4798 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4799 gen_io_start();
4802 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4803 tcg_gen_debug_insn_start(dc.pc);
4806 status = NO_EXIT;
4807 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4808 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4809 if (bp->pc == dc.pc) {
4810 status = EXIT_PC_STALE;
4811 do_debug = true;
4812 break;
4816 if (status == NO_EXIT) {
4817 status = translate_one(env, &dc);
4820 /* If we reach a page boundary, are single stepping,
4821 or exhaust instruction count, stop generation. */
4822 if (status == NO_EXIT
4823 && (dc.pc >= next_page_start
4824 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4825 || num_insns >= max_insns
4826 || singlestep
4827 || cs->singlestep_enabled)) {
4828 status = EXIT_PC_STALE;
4830 } while (status == NO_EXIT);
4832 if (tb->cflags & CF_LAST_IO) {
4833 gen_io_end();
4836 switch (status) {
4837 case EXIT_GOTO_TB:
4838 case EXIT_NORETURN:
4839 break;
4840 case EXIT_PC_STALE:
4841 update_psw_addr(&dc);
4842 /* FALLTHRU */
4843 case EXIT_PC_UPDATED:
4844 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4845 cc op type is in env */
4846 update_cc_op(&dc);
4847 /* Exit the TB, either by raising a debug exception or by return. */
4848 if (do_debug) {
4849 gen_exception(EXCP_DEBUG);
4850 } else {
4851 tcg_gen_exit_tb(0);
4853 break;
4854 default:
4855 abort();
4858 gen_tb_end(tb, num_insns);
4859 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4860 if (search_pc) {
4861 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4862 lj++;
4863 while (lj <= j) {
4864 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4866 } else {
4867 tb->size = dc.pc - pc_start;
4868 tb->icount = num_insns;
4871 #if defined(S390X_DEBUG_DISAS)
4872 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4873 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4874 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4875 qemu_log("\n");
4877 #endif
4880 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4882 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
4885 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4887 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
4890 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4892 int cc_op;
4893 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4894 cc_op = gen_opc_cc_op[pc_pos];
4895 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4896 env->cc_op = cc_op;