dmg: prevent chunk buffer overflow (CVE-2014-0145)
[qemu.git] / target-s390x / translate.c
blob81b7e330abebbedc8ef4d4cf3df002d558a88abf
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
72 #define DISAS_EXCP 4
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
86 return pc;
89 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
92 S390CPU *cpu = S390_CPU(cs);
93 CPUS390XState *env = &cpu->env;
94 int i;
96 if (env->cc_op > 3) {
97 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
98 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
99 } else {
100 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
101 env->psw.mask, env->psw.addr, env->cc_op);
104 for (i = 0; i < 16; i++) {
105 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
106 if ((i % 4) == 3) {
107 cpu_fprintf(f, "\n");
108 } else {
109 cpu_fprintf(f, " ");
113 for (i = 0; i < 16; i++) {
114 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
115 if ((i % 4) == 3) {
116 cpu_fprintf(f, "\n");
117 } else {
118 cpu_fprintf(f, " ");
122 #ifndef CONFIG_USER_ONLY
123 for (i = 0; i < 16; i++) {
124 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
125 if ((i % 4) == 3) {
126 cpu_fprintf(f, "\n");
127 } else {
128 cpu_fprintf(f, " ");
131 #endif
133 #ifdef DEBUG_INLINE_BRANCHES
134 for (i = 0; i < CC_OP_MAX; i++) {
135 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
136 inline_branch_miss[i], inline_branch_hit[i]);
138 #endif
140 cpu_fprintf(f, "\n");
143 static TCGv_i64 psw_addr;
144 static TCGv_i64 psw_mask;
146 static TCGv_i32 cc_op;
147 static TCGv_i64 cc_src;
148 static TCGv_i64 cc_dst;
149 static TCGv_i64 cc_vr;
151 static char cpu_reg_names[32][4];
152 static TCGv_i64 regs[16];
153 static TCGv_i64 fregs[16];
155 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
157 void s390x_translate_init(void)
159 int i;
161 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
162 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
163 offsetof(CPUS390XState, psw.addr),
164 "psw_addr");
165 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
166 offsetof(CPUS390XState, psw.mask),
167 "psw_mask");
169 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
170 "cc_op");
171 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
172 "cc_src");
173 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
174 "cc_dst");
175 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
176 "cc_vr");
178 for (i = 0; i < 16; i++) {
179 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
180 regs[i] = tcg_global_mem_new(TCG_AREG0,
181 offsetof(CPUS390XState, regs[i]),
182 cpu_reg_names[i]);
185 for (i = 0; i < 16; i++) {
186 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
187 fregs[i] = tcg_global_mem_new(TCG_AREG0,
188 offsetof(CPUS390XState, fregs[i].d),
189 cpu_reg_names[i + 16]);
193 static TCGv_i64 load_reg(int reg)
195 TCGv_i64 r = tcg_temp_new_i64();
196 tcg_gen_mov_i64(r, regs[reg]);
197 return r;
200 static TCGv_i64 load_freg32_i64(int reg)
202 TCGv_i64 r = tcg_temp_new_i64();
203 tcg_gen_shri_i64(r, fregs[reg], 32);
204 return r;
207 static void store_reg(int reg, TCGv_i64 v)
209 tcg_gen_mov_i64(regs[reg], v);
212 static void store_freg(int reg, TCGv_i64 v)
214 tcg_gen_mov_i64(fregs[reg], v);
217 static void store_reg32_i64(int reg, TCGv_i64 v)
219 /* 32 bit register writes keep the upper half */
220 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 static void store_reg32h_i64(int reg, TCGv_i64 v)
225 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 static void store_freg32_i64(int reg, TCGv_i64 v)
230 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 static void return_low128(TCGv_i64 dest)
235 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 static void update_psw_addr(DisasContext *s)
240 /* psw.addr */
241 tcg_gen_movi_i64(psw_addr, s->pc);
244 static void update_cc_op(DisasContext *s)
246 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
247 tcg_gen_movi_i32(cc_op, s->cc_op);
251 static void potential_page_fault(DisasContext *s)
253 update_psw_addr(s);
254 update_cc_op(s);
257 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
259 return (uint64_t)cpu_lduw_code(env, pc);
262 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
264 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
269 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 static int get_mem_index(DisasContext *s)
274 switch (s->tb->flags & FLAG_MASK_ASC) {
275 case PSW_ASC_PRIMARY >> 32:
276 return 0;
277 case PSW_ASC_SECONDARY >> 32:
278 return 1;
279 case PSW_ASC_HOME >> 32:
280 return 2;
281 default:
282 tcg_abort();
283 break;
287 static void gen_exception(int excp)
289 TCGv_i32 tmp = tcg_const_i32(excp);
290 gen_helper_exception(cpu_env, tmp);
291 tcg_temp_free_i32(tmp);
294 static void gen_program_exception(DisasContext *s, int code)
296 TCGv_i32 tmp;
298 /* Remember what pgm exeption this was. */
299 tmp = tcg_const_i32(code);
300 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
301 tcg_temp_free_i32(tmp);
303 tmp = tcg_const_i32(s->next_pc - s->pc);
304 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
305 tcg_temp_free_i32(tmp);
307 /* Advance past instruction. */
308 s->pc = s->next_pc;
309 update_psw_addr(s);
311 /* Save off cc. */
312 update_cc_op(s);
314 /* Trigger exception. */
315 gen_exception(EXCP_PGM);
318 static inline void gen_illegal_opcode(DisasContext *s)
320 gen_program_exception(s, PGM_SPECIFICATION);
323 static inline void check_privileged(DisasContext *s)
325 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
326 gen_program_exception(s, PGM_PRIVILEGED);
330 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
332 TCGv_i64 tmp = tcg_temp_new_i64();
333 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
335 /* Note that d2 is limited to 20 bits, signed. If we crop negative
336 displacements early we create larger immedate addends. */
338 /* Note that addi optimizes the imm==0 case. */
339 if (b2 && x2) {
340 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
341 tcg_gen_addi_i64(tmp, tmp, d2);
342 } else if (b2) {
343 tcg_gen_addi_i64(tmp, regs[b2], d2);
344 } else if (x2) {
345 tcg_gen_addi_i64(tmp, regs[x2], d2);
346 } else {
347 if (need_31) {
348 d2 &= 0x7fffffff;
349 need_31 = false;
351 tcg_gen_movi_i64(tmp, d2);
353 if (need_31) {
354 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
357 return tmp;
360 static inline bool live_cc_data(DisasContext *s)
362 return (s->cc_op != CC_OP_DYNAMIC
363 && s->cc_op != CC_OP_STATIC
364 && s->cc_op > 3);
367 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
369 if (live_cc_data(s)) {
370 tcg_gen_discard_i64(cc_src);
371 tcg_gen_discard_i64(cc_dst);
372 tcg_gen_discard_i64(cc_vr);
374 s->cc_op = CC_OP_CONST0 + val;
377 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_vr);
383 tcg_gen_mov_i64(cc_dst, dst);
384 s->cc_op = op;
387 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
388 TCGv_i64 dst)
390 if (live_cc_data(s)) {
391 tcg_gen_discard_i64(cc_vr);
393 tcg_gen_mov_i64(cc_src, src);
394 tcg_gen_mov_i64(cc_dst, dst);
395 s->cc_op = op;
398 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
399 TCGv_i64 dst, TCGv_i64 vr)
401 tcg_gen_mov_i64(cc_src, src);
402 tcg_gen_mov_i64(cc_dst, dst);
403 tcg_gen_mov_i64(cc_vr, vr);
404 s->cc_op = op;
407 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
409 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
412 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
414 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
417 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
419 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
422 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
424 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
427 /* CC value is in env->cc_op */
428 static void set_cc_static(DisasContext *s)
430 if (live_cc_data(s)) {
431 tcg_gen_discard_i64(cc_src);
432 tcg_gen_discard_i64(cc_dst);
433 tcg_gen_discard_i64(cc_vr);
435 s->cc_op = CC_OP_STATIC;
438 /* calculates cc into cc_op */
439 static void gen_op_calc_cc(DisasContext *s)
441 TCGv_i32 local_cc_op;
442 TCGv_i64 dummy;
444 TCGV_UNUSED_I32(local_cc_op);
445 TCGV_UNUSED_I64(dummy);
446 switch (s->cc_op) {
447 default:
448 dummy = tcg_const_i64(0);
449 /* FALLTHRU */
450 case CC_OP_ADD_64:
451 case CC_OP_ADDU_64:
452 case CC_OP_ADDC_64:
453 case CC_OP_SUB_64:
454 case CC_OP_SUBU_64:
455 case CC_OP_SUBB_64:
456 case CC_OP_ADD_32:
457 case CC_OP_ADDU_32:
458 case CC_OP_ADDC_32:
459 case CC_OP_SUB_32:
460 case CC_OP_SUBU_32:
461 case CC_OP_SUBB_32:
462 local_cc_op = tcg_const_i32(s->cc_op);
463 break;
464 case CC_OP_CONST0:
465 case CC_OP_CONST1:
466 case CC_OP_CONST2:
467 case CC_OP_CONST3:
468 case CC_OP_STATIC:
469 case CC_OP_DYNAMIC:
470 break;
473 switch (s->cc_op) {
474 case CC_OP_CONST0:
475 case CC_OP_CONST1:
476 case CC_OP_CONST2:
477 case CC_OP_CONST3:
478 /* s->cc_op is the cc value */
479 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
480 break;
481 case CC_OP_STATIC:
482 /* env->cc_op already is the cc value */
483 break;
484 case CC_OP_NZ:
485 case CC_OP_ABS_64:
486 case CC_OP_NABS_64:
487 case CC_OP_ABS_32:
488 case CC_OP_NABS_32:
489 case CC_OP_LTGT0_32:
490 case CC_OP_LTGT0_64:
491 case CC_OP_COMP_32:
492 case CC_OP_COMP_64:
493 case CC_OP_NZ_F32:
494 case CC_OP_NZ_F64:
495 case CC_OP_FLOGR:
496 /* 1 argument */
497 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
498 break;
499 case CC_OP_ICM:
500 case CC_OP_LTGT_32:
501 case CC_OP_LTGT_64:
502 case CC_OP_LTUGTU_32:
503 case CC_OP_LTUGTU_64:
504 case CC_OP_TM_32:
505 case CC_OP_TM_64:
506 case CC_OP_SLA_32:
507 case CC_OP_SLA_64:
508 case CC_OP_NZ_F128:
509 /* 2 arguments */
510 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
511 break;
512 case CC_OP_ADD_64:
513 case CC_OP_ADDU_64:
514 case CC_OP_ADDC_64:
515 case CC_OP_SUB_64:
516 case CC_OP_SUBU_64:
517 case CC_OP_SUBB_64:
518 case CC_OP_ADD_32:
519 case CC_OP_ADDU_32:
520 case CC_OP_ADDC_32:
521 case CC_OP_SUB_32:
522 case CC_OP_SUBU_32:
523 case CC_OP_SUBB_32:
524 /* 3 arguments */
525 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
526 break;
527 case CC_OP_DYNAMIC:
528 /* unknown operation - assume 3 arguments and cc_op in env */
529 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
530 break;
531 default:
532 tcg_abort();
535 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
536 tcg_temp_free_i32(local_cc_op);
538 if (!TCGV_IS_UNUSED_I64(dummy)) {
539 tcg_temp_free_i64(dummy);
542 /* We now have cc in cc_op as constant */
543 set_cc_static(s);
546 static int use_goto_tb(DisasContext *s, uint64_t dest)
548 /* NOTE: we handle the case where the TB spans two pages here */
549 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
550 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
551 && !s->singlestep_enabled
552 && !(s->tb->cflags & CF_LAST_IO));
555 static void account_noninline_branch(DisasContext *s, int cc_op)
557 #ifdef DEBUG_INLINE_BRANCHES
558 inline_branch_miss[cc_op]++;
559 #endif
562 static void account_inline_branch(DisasContext *s, int cc_op)
564 #ifdef DEBUG_INLINE_BRANCHES
565 inline_branch_hit[cc_op]++;
566 #endif
569 /* Table of mask values to comparison codes, given a comparison as input.
570 For such, CC=3 should not be possible. */
571 static const TCGCond ltgt_cond[16] = {
572 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
573 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
574 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
575 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
576 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
577 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
578 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
579 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
582 /* Table of mask values to comparison codes, given a logic op as input.
583 For such, only CC=0 and CC=1 should be possible. */
584 static const TCGCond nz_cond[16] = {
585 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
586 TCG_COND_NEVER, TCG_COND_NEVER,
587 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
588 TCG_COND_NE, TCG_COND_NE,
589 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
590 TCG_COND_EQ, TCG_COND_EQ,
591 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
592 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
595 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
596 details required to generate a TCG comparison. */
597 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
599 TCGCond cond;
600 enum cc_op old_cc_op = s->cc_op;
602 if (mask == 15 || mask == 0) {
603 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
604 c->u.s32.a = cc_op;
605 c->u.s32.b = cc_op;
606 c->g1 = c->g2 = true;
607 c->is_64 = false;
608 return;
611 /* Find the TCG condition for the mask + cc op. */
612 switch (old_cc_op) {
613 case CC_OP_LTGT0_32:
614 case CC_OP_LTGT0_64:
615 case CC_OP_LTGT_32:
616 case CC_OP_LTGT_64:
617 cond = ltgt_cond[mask];
618 if (cond == TCG_COND_NEVER) {
619 goto do_dynamic;
621 account_inline_branch(s, old_cc_op);
622 break;
624 case CC_OP_LTUGTU_32:
625 case CC_OP_LTUGTU_64:
626 cond = tcg_unsigned_cond(ltgt_cond[mask]);
627 if (cond == TCG_COND_NEVER) {
628 goto do_dynamic;
630 account_inline_branch(s, old_cc_op);
631 break;
633 case CC_OP_NZ:
634 cond = nz_cond[mask];
635 if (cond == TCG_COND_NEVER) {
636 goto do_dynamic;
638 account_inline_branch(s, old_cc_op);
639 break;
641 case CC_OP_TM_32:
642 case CC_OP_TM_64:
643 switch (mask) {
644 case 8:
645 cond = TCG_COND_EQ;
646 break;
647 case 4 | 2 | 1:
648 cond = TCG_COND_NE;
649 break;
650 default:
651 goto do_dynamic;
653 account_inline_branch(s, old_cc_op);
654 break;
656 case CC_OP_ICM:
657 switch (mask) {
658 case 8:
659 cond = TCG_COND_EQ;
660 break;
661 case 4 | 2 | 1:
662 case 4 | 2:
663 cond = TCG_COND_NE;
664 break;
665 default:
666 goto do_dynamic;
668 account_inline_branch(s, old_cc_op);
669 break;
671 case CC_OP_FLOGR:
672 switch (mask & 0xa) {
673 case 8: /* src == 0 -> no one bit found */
674 cond = TCG_COND_EQ;
675 break;
676 case 2: /* src != 0 -> one bit found */
677 cond = TCG_COND_NE;
678 break;
679 default:
680 goto do_dynamic;
682 account_inline_branch(s, old_cc_op);
683 break;
685 case CC_OP_ADDU_32:
686 case CC_OP_ADDU_64:
687 switch (mask) {
688 case 8 | 2: /* vr == 0 */
689 cond = TCG_COND_EQ;
690 break;
691 case 4 | 1: /* vr != 0 */
692 cond = TCG_COND_NE;
693 break;
694 case 8 | 4: /* no carry -> vr >= src */
695 cond = TCG_COND_GEU;
696 break;
697 case 2 | 1: /* carry -> vr < src */
698 cond = TCG_COND_LTU;
699 break;
700 default:
701 goto do_dynamic;
703 account_inline_branch(s, old_cc_op);
704 break;
706 case CC_OP_SUBU_32:
707 case CC_OP_SUBU_64:
708 /* Note that CC=0 is impossible; treat it as dont-care. */
709 switch (mask & 7) {
710 case 2: /* zero -> op1 == op2 */
711 cond = TCG_COND_EQ;
712 break;
713 case 4 | 1: /* !zero -> op1 != op2 */
714 cond = TCG_COND_NE;
715 break;
716 case 4: /* borrow (!carry) -> op1 < op2 */
717 cond = TCG_COND_LTU;
718 break;
719 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
720 cond = TCG_COND_GEU;
721 break;
722 default:
723 goto do_dynamic;
725 account_inline_branch(s, old_cc_op);
726 break;
728 default:
729 do_dynamic:
730 /* Calculate cc value. */
731 gen_op_calc_cc(s);
732 /* FALLTHRU */
734 case CC_OP_STATIC:
735 /* Jump based on CC. We'll load up the real cond below;
736 the assignment here merely avoids a compiler warning. */
737 account_noninline_branch(s, old_cc_op);
738 old_cc_op = CC_OP_STATIC;
739 cond = TCG_COND_NEVER;
740 break;
743 /* Load up the arguments of the comparison. */
744 c->is_64 = true;
745 c->g1 = c->g2 = false;
746 switch (old_cc_op) {
747 case CC_OP_LTGT0_32:
748 c->is_64 = false;
749 c->u.s32.a = tcg_temp_new_i32();
750 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
751 c->u.s32.b = tcg_const_i32(0);
752 break;
753 case CC_OP_LTGT_32:
754 case CC_OP_LTUGTU_32:
755 case CC_OP_SUBU_32:
756 c->is_64 = false;
757 c->u.s32.a = tcg_temp_new_i32();
758 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
759 c->u.s32.b = tcg_temp_new_i32();
760 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
761 break;
763 case CC_OP_LTGT0_64:
764 case CC_OP_NZ:
765 case CC_OP_FLOGR:
766 c->u.s64.a = cc_dst;
767 c->u.s64.b = tcg_const_i64(0);
768 c->g1 = true;
769 break;
770 case CC_OP_LTGT_64:
771 case CC_OP_LTUGTU_64:
772 case CC_OP_SUBU_64:
773 c->u.s64.a = cc_src;
774 c->u.s64.b = cc_dst;
775 c->g1 = c->g2 = true;
776 break;
778 case CC_OP_TM_32:
779 case CC_OP_TM_64:
780 case CC_OP_ICM:
781 c->u.s64.a = tcg_temp_new_i64();
782 c->u.s64.b = tcg_const_i64(0);
783 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
784 break;
786 case CC_OP_ADDU_32:
787 c->is_64 = false;
788 c->u.s32.a = tcg_temp_new_i32();
789 c->u.s32.b = tcg_temp_new_i32();
790 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
791 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
792 tcg_gen_movi_i32(c->u.s32.b, 0);
793 } else {
794 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
796 break;
798 case CC_OP_ADDU_64:
799 c->u.s64.a = cc_vr;
800 c->g1 = true;
801 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
802 c->u.s64.b = tcg_const_i64(0);
803 } else {
804 c->u.s64.b = cc_src;
805 c->g2 = true;
807 break;
809 case CC_OP_STATIC:
810 c->is_64 = false;
811 c->u.s32.a = cc_op;
812 c->g1 = true;
813 switch (mask) {
814 case 0x8 | 0x4 | 0x2: /* cc != 3 */
815 cond = TCG_COND_NE;
816 c->u.s32.b = tcg_const_i32(3);
817 break;
818 case 0x8 | 0x4 | 0x1: /* cc != 2 */
819 cond = TCG_COND_NE;
820 c->u.s32.b = tcg_const_i32(2);
821 break;
822 case 0x8 | 0x2 | 0x1: /* cc != 1 */
823 cond = TCG_COND_NE;
824 c->u.s32.b = tcg_const_i32(1);
825 break;
826 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
827 cond = TCG_COND_EQ;
828 c->g1 = false;
829 c->u.s32.a = tcg_temp_new_i32();
830 c->u.s32.b = tcg_const_i32(0);
831 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
832 break;
833 case 0x8 | 0x4: /* cc < 2 */
834 cond = TCG_COND_LTU;
835 c->u.s32.b = tcg_const_i32(2);
836 break;
837 case 0x8: /* cc == 0 */
838 cond = TCG_COND_EQ;
839 c->u.s32.b = tcg_const_i32(0);
840 break;
841 case 0x4 | 0x2 | 0x1: /* cc != 0 */
842 cond = TCG_COND_NE;
843 c->u.s32.b = tcg_const_i32(0);
844 break;
845 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
846 cond = TCG_COND_NE;
847 c->g1 = false;
848 c->u.s32.a = tcg_temp_new_i32();
849 c->u.s32.b = tcg_const_i32(0);
850 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
851 break;
852 case 0x4: /* cc == 1 */
853 cond = TCG_COND_EQ;
854 c->u.s32.b = tcg_const_i32(1);
855 break;
856 case 0x2 | 0x1: /* cc > 1 */
857 cond = TCG_COND_GTU;
858 c->u.s32.b = tcg_const_i32(1);
859 break;
860 case 0x2: /* cc == 2 */
861 cond = TCG_COND_EQ;
862 c->u.s32.b = tcg_const_i32(2);
863 break;
864 case 0x1: /* cc == 3 */
865 cond = TCG_COND_EQ;
866 c->u.s32.b = tcg_const_i32(3);
867 break;
868 default:
869 /* CC is masked by something else: (8 >> cc) & mask. */
870 cond = TCG_COND_NE;
871 c->g1 = false;
872 c->u.s32.a = tcg_const_i32(8);
873 c->u.s32.b = tcg_const_i32(0);
874 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
875 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
876 break;
878 break;
880 default:
881 abort();
883 c->cond = cond;
886 static void free_compare(DisasCompare *c)
888 if (!c->g1) {
889 if (c->is_64) {
890 tcg_temp_free_i64(c->u.s64.a);
891 } else {
892 tcg_temp_free_i32(c->u.s32.a);
895 if (!c->g2) {
896 if (c->is_64) {
897 tcg_temp_free_i64(c->u.s64.b);
898 } else {
899 tcg_temp_free_i32(c->u.s32.b);
904 /* ====================================================================== */
905 /* Define the insn format enumeration. */
906 #define F0(N) FMT_##N,
907 #define F1(N, X1) F0(N)
908 #define F2(N, X1, X2) F0(N)
909 #define F3(N, X1, X2, X3) F0(N)
910 #define F4(N, X1, X2, X3, X4) F0(N)
911 #define F5(N, X1, X2, X3, X4, X5) F0(N)
913 typedef enum {
914 #include "insn-format.def"
915 } DisasFormat;
917 #undef F0
918 #undef F1
919 #undef F2
920 #undef F3
921 #undef F4
922 #undef F5
924 /* Define a structure to hold the decoded fields. We'll store each inside
925 an array indexed by an enum. In order to conserve memory, we'll arrange
926 for fields that do not exist at the same time to overlap, thus the "C"
927 for compact. For checking purposes there is an "O" for original index
928 as well that will be applied to availability bitmaps. */
930 enum DisasFieldIndexO {
931 FLD_O_r1,
932 FLD_O_r2,
933 FLD_O_r3,
934 FLD_O_m1,
935 FLD_O_m3,
936 FLD_O_m4,
937 FLD_O_b1,
938 FLD_O_b2,
939 FLD_O_b4,
940 FLD_O_d1,
941 FLD_O_d2,
942 FLD_O_d4,
943 FLD_O_x2,
944 FLD_O_l1,
945 FLD_O_l2,
946 FLD_O_i1,
947 FLD_O_i2,
948 FLD_O_i3,
949 FLD_O_i4,
950 FLD_O_i5
953 enum DisasFieldIndexC {
954 FLD_C_r1 = 0,
955 FLD_C_m1 = 0,
956 FLD_C_b1 = 0,
957 FLD_C_i1 = 0,
959 FLD_C_r2 = 1,
960 FLD_C_b2 = 1,
961 FLD_C_i2 = 1,
963 FLD_C_r3 = 2,
964 FLD_C_m3 = 2,
965 FLD_C_i3 = 2,
967 FLD_C_m4 = 3,
968 FLD_C_b4 = 3,
969 FLD_C_i4 = 3,
970 FLD_C_l1 = 3,
972 FLD_C_i5 = 4,
973 FLD_C_d1 = 4,
975 FLD_C_d2 = 5,
977 FLD_C_d4 = 6,
978 FLD_C_x2 = 6,
979 FLD_C_l2 = 6,
981 NUM_C_FIELD = 7
984 struct DisasFields {
985 unsigned op:8;
986 unsigned op2:8;
987 unsigned presentC:16;
988 unsigned int presentO;
989 int c[NUM_C_FIELD];
992 /* This is the way fields are to be accessed out of DisasFields. */
993 #define have_field(S, F) have_field1((S), FLD_O_##F)
994 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
996 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
998 return (f->presentO >> c) & 1;
1001 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1002 enum DisasFieldIndexC c)
1004 assert(have_field1(f, o));
1005 return f->c[c];
1008 /* Describe the layout of each field in each format. */
1009 typedef struct DisasField {
1010 unsigned int beg:8;
1011 unsigned int size:8;
1012 unsigned int type:2;
1013 unsigned int indexC:6;
1014 enum DisasFieldIndexO indexO:8;
1015 } DisasField;
1017 typedef struct DisasFormatInfo {
1018 DisasField op[NUM_C_FIELD];
1019 } DisasFormatInfo;
1021 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1022 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1023 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1025 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1028 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1029 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1030 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1031 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1032 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1033 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1034 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1036 #define F0(N) { { } },
1037 #define F1(N, X1) { { X1 } },
1038 #define F2(N, X1, X2) { { X1, X2 } },
1039 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1040 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1041 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1043 static const DisasFormatInfo format_info[] = {
1044 #include "insn-format.def"
1047 #undef F0
1048 #undef F1
1049 #undef F2
1050 #undef F3
1051 #undef F4
1052 #undef F5
1053 #undef R
1054 #undef M
1055 #undef BD
1056 #undef BXD
1057 #undef BDL
1058 #undef BXDL
1059 #undef I
1060 #undef L
1062 /* Generally, we'll extract operands into this structures, operate upon
1063 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1064 of routines below for more details. */
1065 typedef struct {
1066 bool g_out, g_out2, g_in1, g_in2;
1067 TCGv_i64 out, out2, in1, in2;
1068 TCGv_i64 addr1;
1069 } DisasOps;
1071 /* Instructions can place constraints on their operands, raising specification
1072 exceptions if they are violated. To make this easy to automate, each "in1",
1073 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1074 of the following, or 0. To make this easy to document, we'll put the
1075 SPEC_<name> defines next to <name>. */
1077 #define SPEC_r1_even 1
1078 #define SPEC_r2_even 2
1079 #define SPEC_r3_even 4
1080 #define SPEC_r1_f128 8
1081 #define SPEC_r2_f128 16
1083 /* Return values from translate_one, indicating the state of the TB. */
1084 typedef enum {
1085 /* Continue the TB. */
1086 NO_EXIT,
1087 /* We have emitted one or more goto_tb. No fixup required. */
1088 EXIT_GOTO_TB,
1089 /* We are not using a goto_tb (for whatever reason), but have updated
1090 the PC (for whatever reason), so there's no need to do it again on
1091 exiting the TB. */
1092 EXIT_PC_UPDATED,
1093 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1094 updated the PC for the next instruction to be executed. */
1095 EXIT_PC_STALE,
1096 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1097 No following code will be executed. */
1098 EXIT_NORETURN,
1099 } ExitStatus;
1101 typedef enum DisasFacility {
1102 FAC_Z, /* zarch (default) */
1103 FAC_CASS, /* compare and swap and store */
1104 FAC_CASS2, /* compare and swap and store 2*/
1105 FAC_DFP, /* decimal floating point */
1106 FAC_DFPR, /* decimal floating point rounding */
1107 FAC_DO, /* distinct operands */
1108 FAC_EE, /* execute extensions */
1109 FAC_EI, /* extended immediate */
1110 FAC_FPE, /* floating point extension */
1111 FAC_FPSSH, /* floating point support sign handling */
1112 FAC_FPRGR, /* FPR-GR transfer */
1113 FAC_GIE, /* general instructions extension */
1114 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1115 FAC_HW, /* high-word */
1116 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1117 FAC_LOC, /* load/store on condition */
1118 FAC_LD, /* long displacement */
1119 FAC_PC, /* population count */
1120 FAC_SCF, /* store clock fast */
1121 FAC_SFLE, /* store facility list extended */
1122 } DisasFacility;
1124 struct DisasInsn {
1125 unsigned opc:16;
1126 DisasFormat fmt:8;
1127 DisasFacility fac:8;
1128 unsigned spec:8;
1130 const char *name;
1132 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1136 void (*help_cout)(DisasContext *, DisasOps *);
1137 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1139 uint64_t data;
1142 /* ====================================================================== */
1143 /* Miscellaneous helpers, used by several operations. */
1145 static void help_l2_shift(DisasContext *s, DisasFields *f,
1146 DisasOps *o, int mask)
1148 int b2 = get_field(f, b2);
1149 int d2 = get_field(f, d2);
1151 if (b2 == 0) {
1152 o->in2 = tcg_const_i64(d2 & mask);
1153 } else {
1154 o->in2 = get_address(s, 0, b2, d2);
1155 tcg_gen_andi_i64(o->in2, o->in2, mask);
1159 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1161 if (dest == s->next_pc) {
1162 return NO_EXIT;
1164 if (use_goto_tb(s, dest)) {
1165 update_cc_op(s);
1166 tcg_gen_goto_tb(0);
1167 tcg_gen_movi_i64(psw_addr, dest);
1168 tcg_gen_exit_tb((uintptr_t)s->tb);
1169 return EXIT_GOTO_TB;
1170 } else {
1171 tcg_gen_movi_i64(psw_addr, dest);
1172 return EXIT_PC_UPDATED;
1176 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1177 bool is_imm, int imm, TCGv_i64 cdest)
1179 ExitStatus ret;
1180 uint64_t dest = s->pc + 2 * imm;
1181 int lab;
1183 /* Take care of the special cases first. */
1184 if (c->cond == TCG_COND_NEVER) {
1185 ret = NO_EXIT;
1186 goto egress;
1188 if (is_imm) {
1189 if (dest == s->next_pc) {
1190 /* Branch to next. */
1191 ret = NO_EXIT;
1192 goto egress;
1194 if (c->cond == TCG_COND_ALWAYS) {
1195 ret = help_goto_direct(s, dest);
1196 goto egress;
1198 } else {
1199 if (TCGV_IS_UNUSED_I64(cdest)) {
1200 /* E.g. bcr %r0 -> no branch. */
1201 ret = NO_EXIT;
1202 goto egress;
1204 if (c->cond == TCG_COND_ALWAYS) {
1205 tcg_gen_mov_i64(psw_addr, cdest);
1206 ret = EXIT_PC_UPDATED;
1207 goto egress;
1211 if (use_goto_tb(s, s->next_pc)) {
1212 if (is_imm && use_goto_tb(s, dest)) {
1213 /* Both exits can use goto_tb. */
1214 update_cc_op(s);
1216 lab = gen_new_label();
1217 if (c->is_64) {
1218 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1219 } else {
1220 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1223 /* Branch not taken. */
1224 tcg_gen_goto_tb(0);
1225 tcg_gen_movi_i64(psw_addr, s->next_pc);
1226 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1228 /* Branch taken. */
1229 gen_set_label(lab);
1230 tcg_gen_goto_tb(1);
1231 tcg_gen_movi_i64(psw_addr, dest);
1232 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1234 ret = EXIT_GOTO_TB;
1235 } else {
1236 /* Fallthru can use goto_tb, but taken branch cannot. */
1237 /* Store taken branch destination before the brcond. This
1238 avoids having to allocate a new local temp to hold it.
1239 We'll overwrite this in the not taken case anyway. */
1240 if (!is_imm) {
1241 tcg_gen_mov_i64(psw_addr, cdest);
1244 lab = gen_new_label();
1245 if (c->is_64) {
1246 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1247 } else {
1248 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1251 /* Branch not taken. */
1252 update_cc_op(s);
1253 tcg_gen_goto_tb(0);
1254 tcg_gen_movi_i64(psw_addr, s->next_pc);
1255 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1257 gen_set_label(lab);
1258 if (is_imm) {
1259 tcg_gen_movi_i64(psw_addr, dest);
1261 ret = EXIT_PC_UPDATED;
1263 } else {
1264 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1265 Most commonly we're single-stepping or some other condition that
1266 disables all use of goto_tb. Just update the PC and exit. */
1268 TCGv_i64 next = tcg_const_i64(s->next_pc);
1269 if (is_imm) {
1270 cdest = tcg_const_i64(dest);
1273 if (c->is_64) {
1274 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1275 cdest, next);
1276 } else {
1277 TCGv_i32 t0 = tcg_temp_new_i32();
1278 TCGv_i64 t1 = tcg_temp_new_i64();
1279 TCGv_i64 z = tcg_const_i64(0);
1280 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1281 tcg_gen_extu_i32_i64(t1, t0);
1282 tcg_temp_free_i32(t0);
1283 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1284 tcg_temp_free_i64(t1);
1285 tcg_temp_free_i64(z);
1288 if (is_imm) {
1289 tcg_temp_free_i64(cdest);
1291 tcg_temp_free_i64(next);
1293 ret = EXIT_PC_UPDATED;
1296 egress:
1297 free_compare(c);
1298 return ret;
1301 /* ====================================================================== */
1302 /* The operations. These perform the bulk of the work for any insn,
1303 usually after the operands have been loaded and output initialized. */
1305 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1307 gen_helper_abs_i64(o->out, o->in2);
1308 return NO_EXIT;
1311 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1313 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1314 return NO_EXIT;
1317 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1319 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1320 return NO_EXIT;
1323 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1325 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1326 tcg_gen_mov_i64(o->out2, o->in2);
1327 return NO_EXIT;
1330 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1332 tcg_gen_add_i64(o->out, o->in1, o->in2);
1333 return NO_EXIT;
1336 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1338 DisasCompare cmp;
1339 TCGv_i64 carry;
1341 tcg_gen_add_i64(o->out, o->in1, o->in2);
1343 /* The carry flag is the msb of CC, therefore the branch mask that would
1344 create that comparison is 3. Feeding the generated comparison to
1345 setcond produces the carry flag that we desire. */
1346 disas_jcc(s, &cmp, 3);
1347 carry = tcg_temp_new_i64();
1348 if (cmp.is_64) {
1349 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1350 } else {
1351 TCGv_i32 t = tcg_temp_new_i32();
1352 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1353 tcg_gen_extu_i32_i64(carry, t);
1354 tcg_temp_free_i32(t);
1356 free_compare(&cmp);
1358 tcg_gen_add_i64(o->out, o->out, carry);
1359 tcg_temp_free_i64(carry);
1360 return NO_EXIT;
1363 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1365 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1366 return NO_EXIT;
1369 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1371 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1372 return NO_EXIT;
1375 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1377 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1378 return_low128(o->out2);
1379 return NO_EXIT;
1382 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1384 tcg_gen_and_i64(o->out, o->in1, o->in2);
1385 return NO_EXIT;
1388 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1390 int shift = s->insn->data & 0xff;
1391 int size = s->insn->data >> 8;
1392 uint64_t mask = ((1ull << size) - 1) << shift;
1394 assert(!o->g_in2);
1395 tcg_gen_shli_i64(o->in2, o->in2, shift);
1396 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1397 tcg_gen_and_i64(o->out, o->in1, o->in2);
1399 /* Produce the CC from only the bits manipulated. */
1400 tcg_gen_andi_i64(cc_dst, o->out, mask);
1401 set_cc_nz_u64(s, cc_dst);
1402 return NO_EXIT;
1405 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1407 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1408 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1409 tcg_gen_mov_i64(psw_addr, o->in2);
1410 return EXIT_PC_UPDATED;
1411 } else {
1412 return NO_EXIT;
1416 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1418 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1419 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1422 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1424 int m1 = get_field(s->fields, m1);
1425 bool is_imm = have_field(s->fields, i2);
1426 int imm = is_imm ? get_field(s->fields, i2) : 0;
1427 DisasCompare c;
1429 disas_jcc(s, &c, m1);
1430 return help_branch(s, &c, is_imm, imm, o->in2);
1433 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1435 int r1 = get_field(s->fields, r1);
1436 bool is_imm = have_field(s->fields, i2);
1437 int imm = is_imm ? get_field(s->fields, i2) : 0;
1438 DisasCompare c;
1439 TCGv_i64 t;
1441 c.cond = TCG_COND_NE;
1442 c.is_64 = false;
1443 c.g1 = false;
1444 c.g2 = false;
1446 t = tcg_temp_new_i64();
1447 tcg_gen_subi_i64(t, regs[r1], 1);
1448 store_reg32_i64(r1, t);
1449 c.u.s32.a = tcg_temp_new_i32();
1450 c.u.s32.b = tcg_const_i32(0);
1451 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1452 tcg_temp_free_i64(t);
1454 return help_branch(s, &c, is_imm, imm, o->in2);
1457 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1459 int r1 = get_field(s->fields, r1);
1460 bool is_imm = have_field(s->fields, i2);
1461 int imm = is_imm ? get_field(s->fields, i2) : 0;
1462 DisasCompare c;
1464 c.cond = TCG_COND_NE;
1465 c.is_64 = true;
1466 c.g1 = true;
1467 c.g2 = false;
1469 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1470 c.u.s64.a = regs[r1];
1471 c.u.s64.b = tcg_const_i64(0);
1473 return help_branch(s, &c, is_imm, imm, o->in2);
1476 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1478 int r1 = get_field(s->fields, r1);
1479 int r3 = get_field(s->fields, r3);
1480 bool is_imm = have_field(s->fields, i2);
1481 int imm = is_imm ? get_field(s->fields, i2) : 0;
1482 DisasCompare c;
1483 TCGv_i64 t;
1485 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1486 c.is_64 = false;
1487 c.g1 = false;
1488 c.g2 = false;
1490 t = tcg_temp_new_i64();
1491 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1492 c.u.s32.a = tcg_temp_new_i32();
1493 c.u.s32.b = tcg_temp_new_i32();
1494 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1495 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1496 store_reg32_i64(r1, t);
1497 tcg_temp_free_i64(t);
1499 return help_branch(s, &c, is_imm, imm, o->in2);
1502 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1504 int r1 = get_field(s->fields, r1);
1505 int r3 = get_field(s->fields, r3);
1506 bool is_imm = have_field(s->fields, i2);
1507 int imm = is_imm ? get_field(s->fields, i2) : 0;
1508 DisasCompare c;
1510 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1511 c.is_64 = true;
1513 if (r1 == (r3 | 1)) {
1514 c.u.s64.b = load_reg(r3 | 1);
1515 c.g2 = false;
1516 } else {
1517 c.u.s64.b = regs[r3 | 1];
1518 c.g2 = true;
1521 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1522 c.u.s64.a = regs[r1];
1523 c.g1 = true;
1525 return help_branch(s, &c, is_imm, imm, o->in2);
1528 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1530 int imm, m3 = get_field(s->fields, m3);
1531 bool is_imm;
1532 DisasCompare c;
1534 c.cond = ltgt_cond[m3];
1535 if (s->insn->data) {
1536 c.cond = tcg_unsigned_cond(c.cond);
1538 c.is_64 = c.g1 = c.g2 = true;
1539 c.u.s64.a = o->in1;
1540 c.u.s64.b = o->in2;
1542 is_imm = have_field(s->fields, i4);
1543 if (is_imm) {
1544 imm = get_field(s->fields, i4);
1545 } else {
1546 imm = 0;
1547 o->out = get_address(s, 0, get_field(s->fields, b4),
1548 get_field(s->fields, d4));
1551 return help_branch(s, &c, is_imm, imm, o->out);
1554 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1556 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1557 set_cc_static(s);
1558 return NO_EXIT;
1561 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1563 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1564 set_cc_static(s);
1565 return NO_EXIT;
1568 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1570 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1571 set_cc_static(s);
1572 return NO_EXIT;
1575 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1577 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1578 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1579 tcg_temp_free_i32(m3);
1580 gen_set_cc_nz_f32(s, o->in2);
1581 return NO_EXIT;
1584 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1586 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1587 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1588 tcg_temp_free_i32(m3);
1589 gen_set_cc_nz_f64(s, o->in2);
1590 return NO_EXIT;
1593 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1595 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1596 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1597 tcg_temp_free_i32(m3);
1598 gen_set_cc_nz_f128(s, o->in1, o->in2);
1599 return NO_EXIT;
1602 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1604 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1605 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1606 tcg_temp_free_i32(m3);
1607 gen_set_cc_nz_f32(s, o->in2);
1608 return NO_EXIT;
1611 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1613 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1614 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1615 tcg_temp_free_i32(m3);
1616 gen_set_cc_nz_f64(s, o->in2);
1617 return NO_EXIT;
1620 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1622 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1623 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1624 tcg_temp_free_i32(m3);
1625 gen_set_cc_nz_f128(s, o->in1, o->in2);
1626 return NO_EXIT;
1629 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1631 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1632 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1633 tcg_temp_free_i32(m3);
1634 gen_set_cc_nz_f32(s, o->in2);
1635 return NO_EXIT;
1638 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1640 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1641 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1642 tcg_temp_free_i32(m3);
1643 gen_set_cc_nz_f64(s, o->in2);
1644 return NO_EXIT;
1647 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1652 gen_set_cc_nz_f128(s, o->in1, o->in2);
1653 return NO_EXIT;
1656 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1658 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1659 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1660 tcg_temp_free_i32(m3);
1661 gen_set_cc_nz_f32(s, o->in2);
1662 return NO_EXIT;
1665 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1667 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1668 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1669 tcg_temp_free_i32(m3);
1670 gen_set_cc_nz_f64(s, o->in2);
1671 return NO_EXIT;
1674 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1676 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1677 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1678 tcg_temp_free_i32(m3);
1679 gen_set_cc_nz_f128(s, o->in1, o->in2);
1680 return NO_EXIT;
1683 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 return NO_EXIT;
1691 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1693 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1694 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1695 tcg_temp_free_i32(m3);
1696 return NO_EXIT;
1699 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 return_low128(o->out2);
1705 return NO_EXIT;
1708 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 return NO_EXIT;
1716 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 return NO_EXIT;
1724 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1726 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1727 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1728 tcg_temp_free_i32(m3);
1729 return_low128(o->out2);
1730 return NO_EXIT;
1733 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1735 int r2 = get_field(s->fields, r2);
1736 TCGv_i64 len = tcg_temp_new_i64();
1738 potential_page_fault(s);
1739 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1740 set_cc_static(s);
1741 return_low128(o->out);
1743 tcg_gen_add_i64(regs[r2], regs[r2], len);
1744 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1745 tcg_temp_free_i64(len);
1747 return NO_EXIT;
1750 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1752 int l = get_field(s->fields, l1);
1753 TCGv_i32 vl;
1755 switch (l + 1) {
1756 case 1:
1757 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1758 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1759 break;
1760 case 2:
1761 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1762 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1763 break;
1764 case 4:
1765 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1766 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1767 break;
1768 case 8:
1769 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1770 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1771 break;
1772 default:
1773 potential_page_fault(s);
1774 vl = tcg_const_i32(l);
1775 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1776 tcg_temp_free_i32(vl);
1777 set_cc_static(s);
1778 return NO_EXIT;
1780 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1781 return NO_EXIT;
1784 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1786 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1787 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1788 potential_page_fault(s);
1789 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1790 tcg_temp_free_i32(r1);
1791 tcg_temp_free_i32(r3);
1792 set_cc_static(s);
1793 return NO_EXIT;
1796 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 TCGv_i32 t1 = tcg_temp_new_i32();
1800 tcg_gen_trunc_i64_i32(t1, o->in1);
1801 potential_page_fault(s);
1802 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1803 set_cc_static(s);
1804 tcg_temp_free_i32(t1);
1805 tcg_temp_free_i32(m3);
1806 return NO_EXIT;
1809 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1811 potential_page_fault(s);
1812 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1813 set_cc_static(s);
1814 return_low128(o->in2);
1815 return NO_EXIT;
1818 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1820 TCGv_i64 t = tcg_temp_new_i64();
1821 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1822 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1823 tcg_gen_or_i64(o->out, o->out, t);
1824 tcg_temp_free_i64(t);
1825 return NO_EXIT;
1828 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1830 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1831 int d2 = get_field(s->fields, d2);
1832 int b2 = get_field(s->fields, b2);
1833 int is_64 = s->insn->data;
1834 TCGv_i64 addr, mem, cc, z;
1836 /* Note that in1 = R3 (new value) and
1837 in2 = (zero-extended) R1 (expected value). */
1839 /* Load the memory into the (temporary) output. While the PoO only talks
1840 about moving the memory to R1 on inequality, if we include equality it
1841 means that R1 is equal to the memory in all conditions. */
1842 addr = get_address(s, 0, b2, d2);
1843 if (is_64) {
1844 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1845 } else {
1846 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1849 /* Are the memory and expected values (un)equal? Note that this setcond
1850 produces the output CC value, thus the NE sense of the test. */
1851 cc = tcg_temp_new_i64();
1852 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1854 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1855 Recall that we are allowed to unconditionally issue the store (and
1856 thus any possible write trap), so (re-)store the original contents
1857 of MEM in case of inequality. */
1858 z = tcg_const_i64(0);
1859 mem = tcg_temp_new_i64();
1860 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1861 if (is_64) {
1862 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1863 } else {
1864 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1866 tcg_temp_free_i64(z);
1867 tcg_temp_free_i64(mem);
1868 tcg_temp_free_i64(addr);
1870 /* Store CC back to cc_op. Wait until after the store so that any
1871 exception gets the old cc_op value. */
1872 tcg_gen_trunc_i64_i32(cc_op, cc);
1873 tcg_temp_free_i64(cc);
1874 set_cc_static(s);
1875 return NO_EXIT;
1878 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1880 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1881 int r1 = get_field(s->fields, r1);
1882 int r3 = get_field(s->fields, r3);
1883 int d2 = get_field(s->fields, d2);
1884 int b2 = get_field(s->fields, b2);
1885 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1887 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1889 addrh = get_address(s, 0, b2, d2);
1890 addrl = get_address(s, 0, b2, d2 + 8);
1891 outh = tcg_temp_new_i64();
1892 outl = tcg_temp_new_i64();
1894 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1895 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1897 /* Fold the double-word compare with arithmetic. */
1898 cc = tcg_temp_new_i64();
1899 z = tcg_temp_new_i64();
1900 tcg_gen_xor_i64(cc, outh, regs[r1]);
1901 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1902 tcg_gen_or_i64(cc, cc, z);
1903 tcg_gen_movi_i64(z, 0);
1904 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1906 memh = tcg_temp_new_i64();
1907 meml = tcg_temp_new_i64();
1908 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1909 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1910 tcg_temp_free_i64(z);
1912 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1913 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1914 tcg_temp_free_i64(memh);
1915 tcg_temp_free_i64(meml);
1916 tcg_temp_free_i64(addrh);
1917 tcg_temp_free_i64(addrl);
1919 /* Save back state now that we've passed all exceptions. */
1920 tcg_gen_mov_i64(regs[r1], outh);
1921 tcg_gen_mov_i64(regs[r1 + 1], outl);
1922 tcg_gen_trunc_i64_i32(cc_op, cc);
1923 tcg_temp_free_i64(outh);
1924 tcg_temp_free_i64(outl);
1925 tcg_temp_free_i64(cc);
1926 set_cc_static(s);
1927 return NO_EXIT;
1930 #ifndef CONFIG_USER_ONLY
1931 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1933 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1934 check_privileged(s);
1935 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1936 tcg_temp_free_i32(r1);
1937 set_cc_static(s);
1938 return NO_EXIT;
1940 #endif
1942 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1944 TCGv_i64 t1 = tcg_temp_new_i64();
1945 TCGv_i32 t2 = tcg_temp_new_i32();
1946 tcg_gen_trunc_i64_i32(t2, o->in1);
1947 gen_helper_cvd(t1, t2);
1948 tcg_temp_free_i32(t2);
1949 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1950 tcg_temp_free_i64(t1);
1951 return NO_EXIT;
1954 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1956 int m3 = get_field(s->fields, m3);
1957 int lab = gen_new_label();
1958 TCGv_i32 t;
1959 TCGCond c;
1961 c = tcg_invert_cond(ltgt_cond[m3]);
1962 if (s->insn->data) {
1963 c = tcg_unsigned_cond(c);
1965 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1967 /* Set DXC to 0xff. */
1968 t = tcg_temp_new_i32();
1969 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1970 tcg_gen_ori_i32(t, t, 0xff00);
1971 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1972 tcg_temp_free_i32(t);
1974 /* Trap. */
1975 gen_program_exception(s, PGM_DATA);
1977 gen_set_label(lab);
1978 return NO_EXIT;
1981 #ifndef CONFIG_USER_ONLY
1982 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1984 TCGv_i32 tmp;
1986 check_privileged(s);
1987 potential_page_fault(s);
1989 /* We pretend the format is RX_a so that D2 is the field we want. */
1990 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1991 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1992 tcg_temp_free_i32(tmp);
1993 return NO_EXIT;
1995 #endif
1997 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1999 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2000 return_low128(o->out);
2001 return NO_EXIT;
2004 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2006 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2007 return_low128(o->out);
2008 return NO_EXIT;
2011 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2013 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2014 return_low128(o->out);
2015 return NO_EXIT;
2018 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2020 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2021 return_low128(o->out);
2022 return NO_EXIT;
2025 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2027 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2028 return NO_EXIT;
2031 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2033 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2034 return NO_EXIT;
2037 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2039 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2040 return_low128(o->out2);
2041 return NO_EXIT;
2044 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2046 int r2 = get_field(s->fields, r2);
2047 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2048 return NO_EXIT;
2051 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2053 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2054 return NO_EXIT;
2057 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2059 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2060 tb->flags, (ab)use the tb->cs_base field as the address of
2061 the template in memory, and grab 8 bits of tb->flags/cflags for
2062 the contents of the register. We would then recognize all this
2063 in gen_intermediate_code_internal, generating code for exactly
2064 one instruction. This new TB then gets executed normally.
2066 On the other hand, this seems to be mostly used for modifying
2067 MVC inside of memcpy, which needs a helper call anyway. So
2068 perhaps this doesn't bear thinking about any further. */
2070 TCGv_i64 tmp;
2072 update_psw_addr(s);
2073 update_cc_op(s);
2075 tmp = tcg_const_i64(s->next_pc);
2076 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2077 tcg_temp_free_i64(tmp);
2079 set_cc_static(s);
2080 return NO_EXIT;
2083 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2085 /* We'll use the original input for cc computation, since we get to
2086 compare that against 0, which ought to be better than comparing
2087 the real output against 64. It also lets cc_dst be a convenient
2088 temporary during our computation. */
2089 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2091 /* R1 = IN ? CLZ(IN) : 64. */
2092 gen_helper_clz(o->out, o->in2);
2094 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2095 value by 64, which is undefined. But since the shift is 64 iff the
2096 input is zero, we still get the correct result after and'ing. */
2097 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2098 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2099 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2100 return NO_EXIT;
2103 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2105 int m3 = get_field(s->fields, m3);
2106 int pos, len, base = s->insn->data;
2107 TCGv_i64 tmp = tcg_temp_new_i64();
2108 uint64_t ccm;
2110 switch (m3) {
2111 case 0xf:
2112 /* Effectively a 32-bit load. */
2113 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2114 len = 32;
2115 goto one_insert;
2117 case 0xc:
2118 case 0x6:
2119 case 0x3:
2120 /* Effectively a 16-bit load. */
2121 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2122 len = 16;
2123 goto one_insert;
2125 case 0x8:
2126 case 0x4:
2127 case 0x2:
2128 case 0x1:
2129 /* Effectively an 8-bit load. */
2130 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2131 len = 8;
2132 goto one_insert;
2134 one_insert:
2135 pos = base + ctz32(m3) * 8;
2136 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2137 ccm = ((1ull << len) - 1) << pos;
2138 break;
2140 default:
2141 /* This is going to be a sequence of loads and inserts. */
2142 pos = base + 32 - 8;
2143 ccm = 0;
2144 while (m3) {
2145 if (m3 & 0x8) {
2146 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2147 tcg_gen_addi_i64(o->in2, o->in2, 1);
2148 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2149 ccm |= 0xff << pos;
2151 m3 = (m3 << 1) & 0xf;
2152 pos -= 8;
2154 break;
2157 tcg_gen_movi_i64(tmp, ccm);
2158 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2159 tcg_temp_free_i64(tmp);
2160 return NO_EXIT;
2163 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2165 int shift = s->insn->data & 0xff;
2166 int size = s->insn->data >> 8;
2167 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2168 return NO_EXIT;
2171 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2173 TCGv_i64 t1;
2175 gen_op_calc_cc(s);
2176 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2178 t1 = tcg_temp_new_i64();
2179 tcg_gen_shli_i64(t1, psw_mask, 20);
2180 tcg_gen_shri_i64(t1, t1, 36);
2181 tcg_gen_or_i64(o->out, o->out, t1);
2183 tcg_gen_extu_i32_i64(t1, cc_op);
2184 tcg_gen_shli_i64(t1, t1, 28);
2185 tcg_gen_or_i64(o->out, o->out, t1);
2186 tcg_temp_free_i64(t1);
2187 return NO_EXIT;
2190 #ifndef CONFIG_USER_ONLY
2191 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2193 check_privileged(s);
2194 gen_helper_ipte(cpu_env, o->in1, o->in2);
2195 return NO_EXIT;
2198 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2200 check_privileged(s);
2201 gen_helper_iske(o->out, cpu_env, o->in2);
2202 return NO_EXIT;
2204 #endif
2206 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2208 gen_helper_ldeb(o->out, cpu_env, o->in2);
2209 return NO_EXIT;
2212 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2214 gen_helper_ledb(o->out, cpu_env, o->in2);
2215 return NO_EXIT;
2218 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2220 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2221 return NO_EXIT;
2224 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2226 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2227 return NO_EXIT;
2230 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2232 gen_helper_lxdb(o->out, cpu_env, o->in2);
2233 return_low128(o->out2);
2234 return NO_EXIT;
2237 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2239 gen_helper_lxeb(o->out, cpu_env, o->in2);
2240 return_low128(o->out2);
2241 return NO_EXIT;
2244 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2246 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2247 return NO_EXIT;
2250 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2252 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2253 return NO_EXIT;
2256 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2258 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2259 return NO_EXIT;
2262 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2264 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2265 return NO_EXIT;
2268 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2270 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2271 return NO_EXIT;
2274 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2276 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2277 return NO_EXIT;
2280 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2282 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2283 return NO_EXIT;
2286 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2288 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2289 return NO_EXIT;
2292 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2294 DisasCompare c;
2296 disas_jcc(s, &c, get_field(s->fields, m3));
2298 if (c.is_64) {
2299 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2300 o->in2, o->in1);
2301 free_compare(&c);
2302 } else {
2303 TCGv_i32 t32 = tcg_temp_new_i32();
2304 TCGv_i64 t, z;
2306 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2307 free_compare(&c);
2309 t = tcg_temp_new_i64();
2310 tcg_gen_extu_i32_i64(t, t32);
2311 tcg_temp_free_i32(t32);
2313 z = tcg_const_i64(0);
2314 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2315 tcg_temp_free_i64(t);
2316 tcg_temp_free_i64(z);
2319 return NO_EXIT;
2322 #ifndef CONFIG_USER_ONLY
2323 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2325 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2326 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2327 check_privileged(s);
2328 potential_page_fault(s);
2329 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2330 tcg_temp_free_i32(r1);
2331 tcg_temp_free_i32(r3);
2332 return NO_EXIT;
2335 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2337 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2338 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2339 check_privileged(s);
2340 potential_page_fault(s);
2341 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2342 tcg_temp_free_i32(r1);
2343 tcg_temp_free_i32(r3);
2344 return NO_EXIT;
2346 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2348 check_privileged(s);
2349 potential_page_fault(s);
2350 gen_helper_lra(o->out, cpu_env, o->in2);
2351 set_cc_static(s);
2352 return NO_EXIT;
2355 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2357 TCGv_i64 t1, t2;
2359 check_privileged(s);
2361 t1 = tcg_temp_new_i64();
2362 t2 = tcg_temp_new_i64();
2363 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2364 tcg_gen_addi_i64(o->in2, o->in2, 4);
2365 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2366 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2367 tcg_gen_shli_i64(t1, t1, 32);
2368 gen_helper_load_psw(cpu_env, t1, t2);
2369 tcg_temp_free_i64(t1);
2370 tcg_temp_free_i64(t2);
2371 return EXIT_NORETURN;
2374 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2376 TCGv_i64 t1, t2;
2378 check_privileged(s);
2380 t1 = tcg_temp_new_i64();
2381 t2 = tcg_temp_new_i64();
2382 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2383 tcg_gen_addi_i64(o->in2, o->in2, 8);
2384 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2385 gen_helper_load_psw(cpu_env, t1, t2);
2386 tcg_temp_free_i64(t1);
2387 tcg_temp_free_i64(t2);
2388 return EXIT_NORETURN;
2390 #endif
2392 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2394 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2395 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2396 potential_page_fault(s);
2397 gen_helper_lam(cpu_env, r1, o->in2, r3);
2398 tcg_temp_free_i32(r1);
2399 tcg_temp_free_i32(r3);
2400 return NO_EXIT;
2403 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2405 int r1 = get_field(s->fields, r1);
2406 int r3 = get_field(s->fields, r3);
2407 TCGv_i64 t = tcg_temp_new_i64();
2408 TCGv_i64 t4 = tcg_const_i64(4);
2410 while (1) {
2411 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2412 store_reg32_i64(r1, t);
2413 if (r1 == r3) {
2414 break;
2416 tcg_gen_add_i64(o->in2, o->in2, t4);
2417 r1 = (r1 + 1) & 15;
2420 tcg_temp_free_i64(t);
2421 tcg_temp_free_i64(t4);
2422 return NO_EXIT;
2425 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2427 int r1 = get_field(s->fields, r1);
2428 int r3 = get_field(s->fields, r3);
2429 TCGv_i64 t = tcg_temp_new_i64();
2430 TCGv_i64 t4 = tcg_const_i64(4);
2432 while (1) {
2433 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2434 store_reg32h_i64(r1, t);
2435 if (r1 == r3) {
2436 break;
2438 tcg_gen_add_i64(o->in2, o->in2, t4);
2439 r1 = (r1 + 1) & 15;
2442 tcg_temp_free_i64(t);
2443 tcg_temp_free_i64(t4);
2444 return NO_EXIT;
2447 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2449 int r1 = get_field(s->fields, r1);
2450 int r3 = get_field(s->fields, r3);
2451 TCGv_i64 t8 = tcg_const_i64(8);
2453 while (1) {
2454 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2455 if (r1 == r3) {
2456 break;
2458 tcg_gen_add_i64(o->in2, o->in2, t8);
2459 r1 = (r1 + 1) & 15;
2462 tcg_temp_free_i64(t8);
2463 return NO_EXIT;
2466 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2468 o->out = o->in2;
2469 o->g_out = o->g_in2;
2470 TCGV_UNUSED_I64(o->in2);
2471 o->g_in2 = false;
2472 return NO_EXIT;
2475 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2477 o->out = o->in1;
2478 o->out2 = o->in2;
2479 o->g_out = o->g_in1;
2480 o->g_out2 = o->g_in2;
2481 TCGV_UNUSED_I64(o->in1);
2482 TCGV_UNUSED_I64(o->in2);
2483 o->g_in1 = o->g_in2 = false;
2484 return NO_EXIT;
2487 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2489 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2490 potential_page_fault(s);
2491 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2492 tcg_temp_free_i32(l);
2493 return NO_EXIT;
2496 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2498 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2499 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2500 potential_page_fault(s);
2501 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2502 tcg_temp_free_i32(r1);
2503 tcg_temp_free_i32(r2);
2504 set_cc_static(s);
2505 return NO_EXIT;
2508 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2510 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2511 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2512 potential_page_fault(s);
2513 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2514 tcg_temp_free_i32(r1);
2515 tcg_temp_free_i32(r3);
2516 set_cc_static(s);
2517 return NO_EXIT;
2520 #ifndef CONFIG_USER_ONLY
2521 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2523 int r1 = get_field(s->fields, l1);
2524 check_privileged(s);
2525 potential_page_fault(s);
2526 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2527 set_cc_static(s);
2528 return NO_EXIT;
2531 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2533 int r1 = get_field(s->fields, l1);
2534 check_privileged(s);
2535 potential_page_fault(s);
2536 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2537 set_cc_static(s);
2538 return NO_EXIT;
2540 #endif
2542 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2544 potential_page_fault(s);
2545 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2546 set_cc_static(s);
2547 return NO_EXIT;
2550 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2552 potential_page_fault(s);
2553 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2554 set_cc_static(s);
2555 return_low128(o->in2);
2556 return NO_EXIT;
2559 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2561 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2562 return NO_EXIT;
2565 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2567 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2568 return NO_EXIT;
2571 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2573 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2574 return NO_EXIT;
2577 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2579 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2580 return NO_EXIT;
2583 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2585 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2586 return NO_EXIT;
2589 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2591 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2592 return_low128(o->out2);
2593 return NO_EXIT;
2596 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2598 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2599 return_low128(o->out2);
2600 return NO_EXIT;
2603 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2605 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2606 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2607 tcg_temp_free_i64(r3);
2608 return NO_EXIT;
2611 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2613 int r3 = get_field(s->fields, r3);
2614 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2615 return NO_EXIT;
2618 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2620 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2621 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2622 tcg_temp_free_i64(r3);
2623 return NO_EXIT;
2626 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2628 int r3 = get_field(s->fields, r3);
2629 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2630 return NO_EXIT;
2633 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2635 gen_helper_nabs_i64(o->out, o->in2);
2636 return NO_EXIT;
2639 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2641 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2642 return NO_EXIT;
2645 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2647 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2648 return NO_EXIT;
2651 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2653 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2654 tcg_gen_mov_i64(o->out2, o->in2);
2655 return NO_EXIT;
2658 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2660 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2661 potential_page_fault(s);
2662 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2663 tcg_temp_free_i32(l);
2664 set_cc_static(s);
2665 return NO_EXIT;
2668 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2670 tcg_gen_neg_i64(o->out, o->in2);
2671 return NO_EXIT;
2674 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2676 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2677 return NO_EXIT;
2680 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2682 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2683 return NO_EXIT;
2686 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2688 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2689 tcg_gen_mov_i64(o->out2, o->in2);
2690 return NO_EXIT;
2693 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2695 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2696 potential_page_fault(s);
2697 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2698 tcg_temp_free_i32(l);
2699 set_cc_static(s);
2700 return NO_EXIT;
2703 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2705 tcg_gen_or_i64(o->out, o->in1, o->in2);
2706 return NO_EXIT;
2709 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2711 int shift = s->insn->data & 0xff;
2712 int size = s->insn->data >> 8;
2713 uint64_t mask = ((1ull << size) - 1) << shift;
2715 assert(!o->g_in2);
2716 tcg_gen_shli_i64(o->in2, o->in2, shift);
2717 tcg_gen_or_i64(o->out, o->in1, o->in2);
2719 /* Produce the CC from only the bits manipulated. */
2720 tcg_gen_andi_i64(cc_dst, o->out, mask);
2721 set_cc_nz_u64(s, cc_dst);
2722 return NO_EXIT;
2725 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2727 gen_helper_popcnt(o->out, o->in2);
2728 return NO_EXIT;
2731 #ifndef CONFIG_USER_ONLY
2732 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2734 check_privileged(s);
2735 gen_helper_ptlb(cpu_env);
2736 return NO_EXIT;
2738 #endif
2740 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2742 int i3 = get_field(s->fields, i3);
2743 int i4 = get_field(s->fields, i4);
2744 int i5 = get_field(s->fields, i5);
2745 int do_zero = i4 & 0x80;
2746 uint64_t mask, imask, pmask;
2747 int pos, len, rot;
2749 /* Adjust the arguments for the specific insn. */
2750 switch (s->fields->op2) {
2751 case 0x55: /* risbg */
2752 i3 &= 63;
2753 i4 &= 63;
2754 pmask = ~0;
2755 break;
2756 case 0x5d: /* risbhg */
2757 i3 &= 31;
2758 i4 &= 31;
2759 pmask = 0xffffffff00000000ull;
2760 break;
2761 case 0x51: /* risblg */
2762 i3 &= 31;
2763 i4 &= 31;
2764 pmask = 0x00000000ffffffffull;
2765 break;
2766 default:
2767 abort();
2770 /* MASK is the set of bits to be inserted from R2.
2771 Take care for I3/I4 wraparound. */
2772 mask = pmask >> i3;
2773 if (i3 <= i4) {
2774 mask ^= pmask >> i4 >> 1;
2775 } else {
2776 mask |= ~(pmask >> i4 >> 1);
2778 mask &= pmask;
2780 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2781 insns, we need to keep the other half of the register. */
2782 imask = ~mask | ~pmask;
2783 if (do_zero) {
2784 if (s->fields->op2 == 0x55) {
2785 imask = 0;
2786 } else {
2787 imask = ~pmask;
2791 /* In some cases we can implement this with deposit, which can be more
2792 efficient on some hosts. */
2793 if (~mask == imask && i3 <= i4) {
2794 if (s->fields->op2 == 0x5d) {
2795 i3 += 32, i4 += 32;
2797 /* Note that we rotate the bits to be inserted to the lsb, not to
2798 the position as described in the PoO. */
2799 len = i4 - i3 + 1;
2800 pos = 63 - i4;
2801 rot = (i5 - pos) & 63;
2802 } else {
2803 pos = len = -1;
2804 rot = i5 & 63;
2807 /* Rotate the input as necessary. */
2808 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2810 /* Insert the selected bits into the output. */
2811 if (pos >= 0) {
2812 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2813 } else if (imask == 0) {
2814 tcg_gen_andi_i64(o->out, o->in2, mask);
2815 } else {
2816 tcg_gen_andi_i64(o->in2, o->in2, mask);
2817 tcg_gen_andi_i64(o->out, o->out, imask);
2818 tcg_gen_or_i64(o->out, o->out, o->in2);
2820 return NO_EXIT;
2823 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2825 int i3 = get_field(s->fields, i3);
2826 int i4 = get_field(s->fields, i4);
2827 int i5 = get_field(s->fields, i5);
2828 uint64_t mask;
2830 /* If this is a test-only form, arrange to discard the result. */
2831 if (i3 & 0x80) {
2832 o->out = tcg_temp_new_i64();
2833 o->g_out = false;
2836 i3 &= 63;
2837 i4 &= 63;
2838 i5 &= 63;
2840 /* MASK is the set of bits to be operated on from R2.
2841 Take care for I3/I4 wraparound. */
2842 mask = ~0ull >> i3;
2843 if (i3 <= i4) {
2844 mask ^= ~0ull >> i4 >> 1;
2845 } else {
2846 mask |= ~(~0ull >> i4 >> 1);
2849 /* Rotate the input as necessary. */
2850 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2852 /* Operate. */
2853 switch (s->fields->op2) {
2854 case 0x55: /* AND */
2855 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2856 tcg_gen_and_i64(o->out, o->out, o->in2);
2857 break;
2858 case 0x56: /* OR */
2859 tcg_gen_andi_i64(o->in2, o->in2, mask);
2860 tcg_gen_or_i64(o->out, o->out, o->in2);
2861 break;
2862 case 0x57: /* XOR */
2863 tcg_gen_andi_i64(o->in2, o->in2, mask);
2864 tcg_gen_xor_i64(o->out, o->out, o->in2);
2865 break;
2866 default:
2867 abort();
2870 /* Set the CC. */
2871 tcg_gen_andi_i64(cc_dst, o->out, mask);
2872 set_cc_nz_u64(s, cc_dst);
2873 return NO_EXIT;
2876 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2878 tcg_gen_bswap16_i64(o->out, o->in2);
2879 return NO_EXIT;
2882 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2884 tcg_gen_bswap32_i64(o->out, o->in2);
2885 return NO_EXIT;
2888 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2890 tcg_gen_bswap64_i64(o->out, o->in2);
2891 return NO_EXIT;
2894 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2896 TCGv_i32 t1 = tcg_temp_new_i32();
2897 TCGv_i32 t2 = tcg_temp_new_i32();
2898 TCGv_i32 to = tcg_temp_new_i32();
2899 tcg_gen_trunc_i64_i32(t1, o->in1);
2900 tcg_gen_trunc_i64_i32(t2, o->in2);
2901 tcg_gen_rotl_i32(to, t1, t2);
2902 tcg_gen_extu_i32_i64(o->out, to);
2903 tcg_temp_free_i32(t1);
2904 tcg_temp_free_i32(t2);
2905 tcg_temp_free_i32(to);
2906 return NO_EXIT;
2909 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2911 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2912 return NO_EXIT;
2915 #ifndef CONFIG_USER_ONLY
2916 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2918 check_privileged(s);
2919 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2920 set_cc_static(s);
2921 return NO_EXIT;
2924 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2926 check_privileged(s);
2927 gen_helper_sacf(cpu_env, o->in2);
2928 /* Addressing mode has changed, so end the block. */
2929 return EXIT_PC_STALE;
2931 #endif
2933 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2935 int r1 = get_field(s->fields, r1);
2936 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2937 return NO_EXIT;
2940 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2942 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2943 return NO_EXIT;
2946 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2948 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2949 return NO_EXIT;
2952 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2954 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2955 return_low128(o->out2);
2956 return NO_EXIT;
2959 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2961 gen_helper_sqeb(o->out, cpu_env, o->in2);
2962 return NO_EXIT;
2965 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2967 gen_helper_sqdb(o->out, cpu_env, o->in2);
2968 return NO_EXIT;
2971 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2973 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2974 return_low128(o->out2);
2975 return NO_EXIT;
2978 #ifndef CONFIG_USER_ONLY
2979 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2981 check_privileged(s);
2982 potential_page_fault(s);
2983 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2984 set_cc_static(s);
2985 return NO_EXIT;
2988 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2990 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2991 check_privileged(s);
2992 potential_page_fault(s);
2993 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2994 tcg_temp_free_i32(r1);
2995 return NO_EXIT;
2997 #endif
2999 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3001 DisasCompare c;
3002 TCGv_i64 a;
3003 int lab, r1;
3005 disas_jcc(s, &c, get_field(s->fields, m3));
3007 lab = gen_new_label();
3008 if (c.is_64) {
3009 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3010 } else {
3011 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3013 free_compare(&c);
3015 r1 = get_field(s->fields, r1);
3016 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3017 if (s->insn->data) {
3018 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3019 } else {
3020 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3022 tcg_temp_free_i64(a);
3024 gen_set_label(lab);
3025 return NO_EXIT;
3028 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3030 uint64_t sign = 1ull << s->insn->data;
3031 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3032 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3033 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3034 /* The arithmetic left shift is curious in that it does not affect
3035 the sign bit. Copy that over from the source unchanged. */
3036 tcg_gen_andi_i64(o->out, o->out, ~sign);
3037 tcg_gen_andi_i64(o->in1, o->in1, sign);
3038 tcg_gen_or_i64(o->out, o->out, o->in1);
3039 return NO_EXIT;
3042 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3044 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3045 return NO_EXIT;
3048 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3050 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3051 return NO_EXIT;
3054 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3056 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3057 return NO_EXIT;
3060 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3062 gen_helper_sfpc(cpu_env, o->in2);
3063 return NO_EXIT;
3066 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3068 gen_helper_sfas(cpu_env, o->in2);
3069 return NO_EXIT;
3072 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3074 int b2 = get_field(s->fields, b2);
3075 int d2 = get_field(s->fields, d2);
3076 TCGv_i64 t1 = tcg_temp_new_i64();
3077 TCGv_i64 t2 = tcg_temp_new_i64();
3078 int mask, pos, len;
3080 switch (s->fields->op2) {
3081 case 0x99: /* SRNM */
3082 pos = 0, len = 2;
3083 break;
3084 case 0xb8: /* SRNMB */
3085 pos = 0, len = 3;
3086 break;
3087 case 0xb9: /* SRNMT */
3088 pos = 4, len = 3;
3089 break;
3090 default:
3091 tcg_abort();
3093 mask = (1 << len) - 1;
3095 /* Insert the value into the appropriate field of the FPC. */
3096 if (b2 == 0) {
3097 tcg_gen_movi_i64(t1, d2 & mask);
3098 } else {
3099 tcg_gen_addi_i64(t1, regs[b2], d2);
3100 tcg_gen_andi_i64(t1, t1, mask);
3102 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3103 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3104 tcg_temp_free_i64(t1);
3106 /* Then install the new FPC to set the rounding mode in fpu_status. */
3107 gen_helper_sfpc(cpu_env, t2);
3108 tcg_temp_free_i64(t2);
3109 return NO_EXIT;
3112 #ifndef CONFIG_USER_ONLY
3113 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3115 check_privileged(s);
3116 tcg_gen_shri_i64(o->in2, o->in2, 4);
3117 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3118 return NO_EXIT;
3121 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3123 check_privileged(s);
3124 gen_helper_sske(cpu_env, o->in1, o->in2);
3125 return NO_EXIT;
3128 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3130 check_privileged(s);
3131 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3132 return NO_EXIT;
3135 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3137 check_privileged(s);
3138 /* ??? Surely cpu address != cpu number. In any case the previous
3139 version of this stored more than the required half-word, so it
3140 is unlikely this has ever been tested. */
3141 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3142 return NO_EXIT;
3145 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3147 gen_helper_stck(o->out, cpu_env);
3148 /* ??? We don't implement clock states. */
3149 gen_op_movi_cc(s, 0);
3150 return NO_EXIT;
3153 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3155 TCGv_i64 c1 = tcg_temp_new_i64();
3156 TCGv_i64 c2 = tcg_temp_new_i64();
3157 gen_helper_stck(c1, cpu_env);
3158 /* Shift the 64-bit value into its place as a zero-extended
3159 104-bit value. Note that "bit positions 64-103 are always
3160 non-zero so that they compare differently to STCK"; we set
3161 the least significant bit to 1. */
3162 tcg_gen_shli_i64(c2, c1, 56);
3163 tcg_gen_shri_i64(c1, c1, 8);
3164 tcg_gen_ori_i64(c2, c2, 0x10000);
3165 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3166 tcg_gen_addi_i64(o->in2, o->in2, 8);
3167 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3168 tcg_temp_free_i64(c1);
3169 tcg_temp_free_i64(c2);
3170 /* ??? We don't implement clock states. */
3171 gen_op_movi_cc(s, 0);
3172 return NO_EXIT;
3175 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3177 check_privileged(s);
3178 gen_helper_sckc(cpu_env, o->in2);
3179 return NO_EXIT;
3182 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3184 check_privileged(s);
3185 gen_helper_stckc(o->out, cpu_env);
3186 return NO_EXIT;
3189 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3191 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3192 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3193 check_privileged(s);
3194 potential_page_fault(s);
3195 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3196 tcg_temp_free_i32(r1);
3197 tcg_temp_free_i32(r3);
3198 return NO_EXIT;
3201 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3203 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3204 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3205 check_privileged(s);
3206 potential_page_fault(s);
3207 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3208 tcg_temp_free_i32(r1);
3209 tcg_temp_free_i32(r3);
3210 return NO_EXIT;
3213 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3215 check_privileged(s);
3216 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3217 return NO_EXIT;
3220 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3222 check_privileged(s);
3223 gen_helper_spt(cpu_env, o->in2);
3224 return NO_EXIT;
3227 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3229 TCGv_i64 f, a;
3230 /* We really ought to have more complete indication of facilities
3231 that we implement. Address this when STFLE is implemented. */
3232 check_privileged(s);
3233 f = tcg_const_i64(0xc0000000);
3234 a = tcg_const_i64(200);
3235 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3236 tcg_temp_free_i64(f);
3237 tcg_temp_free_i64(a);
3238 return NO_EXIT;
3241 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3243 check_privileged(s);
3244 gen_helper_stpt(o->out, cpu_env);
3245 return NO_EXIT;
3248 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3250 check_privileged(s);
3251 potential_page_fault(s);
3252 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3253 set_cc_static(s);
3254 return NO_EXIT;
3257 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3259 check_privileged(s);
3260 gen_helper_spx(cpu_env, o->in2);
3261 return NO_EXIT;
3264 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3266 check_privileged(s);
3267 /* Not operational. */
3268 gen_op_movi_cc(s, 3);
3269 return NO_EXIT;
3272 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3274 check_privileged(s);
3275 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3276 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3277 return NO_EXIT;
3280 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3282 uint64_t i2 = get_field(s->fields, i2);
3283 TCGv_i64 t;
3285 check_privileged(s);
3287 /* It is important to do what the instruction name says: STORE THEN.
3288 If we let the output hook perform the store then if we fault and
3289 restart, we'll have the wrong SYSTEM MASK in place. */
3290 t = tcg_temp_new_i64();
3291 tcg_gen_shri_i64(t, psw_mask, 56);
3292 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3293 tcg_temp_free_i64(t);
3295 if (s->fields->op == 0xac) {
3296 tcg_gen_andi_i64(psw_mask, psw_mask,
3297 (i2 << 56) | 0x00ffffffffffffffull);
3298 } else {
3299 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3301 return NO_EXIT;
3304 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3306 check_privileged(s);
3307 potential_page_fault(s);
3308 gen_helper_stura(cpu_env, o->in2, o->in1);
3309 return NO_EXIT;
3311 #endif
3313 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3315 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3316 return NO_EXIT;
3319 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3321 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3322 return NO_EXIT;
3325 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3327 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3328 return NO_EXIT;
3331 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3333 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3334 return NO_EXIT;
3337 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3339 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3340 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3341 potential_page_fault(s);
3342 gen_helper_stam(cpu_env, r1, o->in2, r3);
3343 tcg_temp_free_i32(r1);
3344 tcg_temp_free_i32(r3);
3345 return NO_EXIT;
3348 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3350 int m3 = get_field(s->fields, m3);
3351 int pos, base = s->insn->data;
3352 TCGv_i64 tmp = tcg_temp_new_i64();
3354 pos = base + ctz32(m3) * 8;
3355 switch (m3) {
3356 case 0xf:
3357 /* Effectively a 32-bit store. */
3358 tcg_gen_shri_i64(tmp, o->in1, pos);
3359 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3360 break;
3362 case 0xc:
3363 case 0x6:
3364 case 0x3:
3365 /* Effectively a 16-bit store. */
3366 tcg_gen_shri_i64(tmp, o->in1, pos);
3367 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3368 break;
3370 case 0x8:
3371 case 0x4:
3372 case 0x2:
3373 case 0x1:
3374 /* Effectively an 8-bit store. */
3375 tcg_gen_shri_i64(tmp, o->in1, pos);
3376 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3377 break;
3379 default:
3380 /* This is going to be a sequence of shifts and stores. */
3381 pos = base + 32 - 8;
3382 while (m3) {
3383 if (m3 & 0x8) {
3384 tcg_gen_shri_i64(tmp, o->in1, pos);
3385 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3386 tcg_gen_addi_i64(o->in2, o->in2, 1);
3388 m3 = (m3 << 1) & 0xf;
3389 pos -= 8;
3391 break;
3393 tcg_temp_free_i64(tmp);
3394 return NO_EXIT;
3397 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3399 int r1 = get_field(s->fields, r1);
3400 int r3 = get_field(s->fields, r3);
3401 int size = s->insn->data;
3402 TCGv_i64 tsize = tcg_const_i64(size);
3404 while (1) {
3405 if (size == 8) {
3406 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3407 } else {
3408 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3410 if (r1 == r3) {
3411 break;
3413 tcg_gen_add_i64(o->in2, o->in2, tsize);
3414 r1 = (r1 + 1) & 15;
3417 tcg_temp_free_i64(tsize);
3418 return NO_EXIT;
3421 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3423 int r1 = get_field(s->fields, r1);
3424 int r3 = get_field(s->fields, r3);
3425 TCGv_i64 t = tcg_temp_new_i64();
3426 TCGv_i64 t4 = tcg_const_i64(4);
3427 TCGv_i64 t32 = tcg_const_i64(32);
3429 while (1) {
3430 tcg_gen_shl_i64(t, regs[r1], t32);
3431 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3432 if (r1 == r3) {
3433 break;
3435 tcg_gen_add_i64(o->in2, o->in2, t4);
3436 r1 = (r1 + 1) & 15;
3439 tcg_temp_free_i64(t);
3440 tcg_temp_free_i64(t4);
3441 tcg_temp_free_i64(t32);
3442 return NO_EXIT;
3445 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3447 potential_page_fault(s);
3448 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3449 set_cc_static(s);
3450 return_low128(o->in2);
3451 return NO_EXIT;
3454 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3456 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3457 return NO_EXIT;
3460 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3462 DisasCompare cmp;
3463 TCGv_i64 borrow;
3465 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3467 /* The !borrow flag is the msb of CC. Since we want the inverse of
3468 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3469 disas_jcc(s, &cmp, 8 | 4);
3470 borrow = tcg_temp_new_i64();
3471 if (cmp.is_64) {
3472 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3473 } else {
3474 TCGv_i32 t = tcg_temp_new_i32();
3475 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3476 tcg_gen_extu_i32_i64(borrow, t);
3477 tcg_temp_free_i32(t);
3479 free_compare(&cmp);
3481 tcg_gen_sub_i64(o->out, o->out, borrow);
3482 tcg_temp_free_i64(borrow);
3483 return NO_EXIT;
3486 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3488 TCGv_i32 t;
3490 update_psw_addr(s);
3491 update_cc_op(s);
3493 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3494 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3495 tcg_temp_free_i32(t);
3497 t = tcg_const_i32(s->next_pc - s->pc);
3498 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3499 tcg_temp_free_i32(t);
3501 gen_exception(EXCP_SVC);
3502 return EXIT_NORETURN;
3505 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3507 gen_helper_tceb(cc_op, o->in1, o->in2);
3508 set_cc_static(s);
3509 return NO_EXIT;
3512 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3514 gen_helper_tcdb(cc_op, o->in1, o->in2);
3515 set_cc_static(s);
3516 return NO_EXIT;
3519 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3521 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3522 set_cc_static(s);
3523 return NO_EXIT;
3526 #ifndef CONFIG_USER_ONLY
3527 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3529 potential_page_fault(s);
3530 gen_helper_tprot(cc_op, o->addr1, o->in2);
3531 set_cc_static(s);
3532 return NO_EXIT;
3534 #endif
3536 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3538 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3539 potential_page_fault(s);
3540 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3541 tcg_temp_free_i32(l);
3542 set_cc_static(s);
3543 return NO_EXIT;
3546 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3548 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3549 potential_page_fault(s);
3550 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3551 tcg_temp_free_i32(l);
3552 return NO_EXIT;
3555 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3557 int d1 = get_field(s->fields, d1);
3558 int d2 = get_field(s->fields, d2);
3559 int b1 = get_field(s->fields, b1);
3560 int b2 = get_field(s->fields, b2);
3561 int l = get_field(s->fields, l1);
3562 TCGv_i32 t32;
3564 o->addr1 = get_address(s, 0, b1, d1);
3566 /* If the addresses are identical, this is a store/memset of zero. */
3567 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3568 o->in2 = tcg_const_i64(0);
3570 l++;
3571 while (l >= 8) {
3572 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3573 l -= 8;
3574 if (l > 0) {
3575 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3578 if (l >= 4) {
3579 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3580 l -= 4;
3581 if (l > 0) {
3582 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3585 if (l >= 2) {
3586 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3587 l -= 2;
3588 if (l > 0) {
3589 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3592 if (l) {
3593 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3595 gen_op_movi_cc(s, 0);
3596 return NO_EXIT;
3599 /* But in general we'll defer to a helper. */
3600 o->in2 = get_address(s, 0, b2, d2);
3601 t32 = tcg_const_i32(l);
3602 potential_page_fault(s);
3603 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3604 tcg_temp_free_i32(t32);
3605 set_cc_static(s);
3606 return NO_EXIT;
3609 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3611 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3612 return NO_EXIT;
3615 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3617 int shift = s->insn->data & 0xff;
3618 int size = s->insn->data >> 8;
3619 uint64_t mask = ((1ull << size) - 1) << shift;
3621 assert(!o->g_in2);
3622 tcg_gen_shli_i64(o->in2, o->in2, shift);
3623 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3625 /* Produce the CC from only the bits manipulated. */
3626 tcg_gen_andi_i64(cc_dst, o->out, mask);
3627 set_cc_nz_u64(s, cc_dst);
3628 return NO_EXIT;
3631 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3633 o->out = tcg_const_i64(0);
3634 return NO_EXIT;
3637 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3639 o->out = tcg_const_i64(0);
3640 o->out2 = o->out;
3641 o->g_out2 = true;
3642 return NO_EXIT;
3645 /* ====================================================================== */
3646 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3647 the original inputs), update the various cc data structures in order to
3648 be able to compute the new condition code. */
3650 static void cout_abs32(DisasContext *s, DisasOps *o)
3652 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3655 static void cout_abs64(DisasContext *s, DisasOps *o)
3657 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3660 static void cout_adds32(DisasContext *s, DisasOps *o)
3662 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3665 static void cout_adds64(DisasContext *s, DisasOps *o)
3667 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3670 static void cout_addu32(DisasContext *s, DisasOps *o)
3672 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3675 static void cout_addu64(DisasContext *s, DisasOps *o)
3677 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3680 static void cout_addc32(DisasContext *s, DisasOps *o)
3682 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3685 static void cout_addc64(DisasContext *s, DisasOps *o)
3687 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3690 static void cout_cmps32(DisasContext *s, DisasOps *o)
3692 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3695 static void cout_cmps64(DisasContext *s, DisasOps *o)
3697 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3700 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3702 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3705 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3707 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3710 static void cout_f32(DisasContext *s, DisasOps *o)
3712 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3715 static void cout_f64(DisasContext *s, DisasOps *o)
3717 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3720 static void cout_f128(DisasContext *s, DisasOps *o)
3722 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3725 static void cout_nabs32(DisasContext *s, DisasOps *o)
3727 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3730 static void cout_nabs64(DisasContext *s, DisasOps *o)
3732 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3735 static void cout_neg32(DisasContext *s, DisasOps *o)
3737 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3740 static void cout_neg64(DisasContext *s, DisasOps *o)
3742 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3745 static void cout_nz32(DisasContext *s, DisasOps *o)
3747 tcg_gen_ext32u_i64(cc_dst, o->out);
3748 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3751 static void cout_nz64(DisasContext *s, DisasOps *o)
3753 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3756 static void cout_s32(DisasContext *s, DisasOps *o)
3758 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3761 static void cout_s64(DisasContext *s, DisasOps *o)
3763 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3766 static void cout_subs32(DisasContext *s, DisasOps *o)
3768 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3771 static void cout_subs64(DisasContext *s, DisasOps *o)
3773 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3776 static void cout_subu32(DisasContext *s, DisasOps *o)
3778 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3781 static void cout_subu64(DisasContext *s, DisasOps *o)
3783 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3786 static void cout_subb32(DisasContext *s, DisasOps *o)
3788 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3791 static void cout_subb64(DisasContext *s, DisasOps *o)
3793 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3796 static void cout_tm32(DisasContext *s, DisasOps *o)
3798 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3801 static void cout_tm64(DisasContext *s, DisasOps *o)
3803 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3806 /* ====================================================================== */
3807 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3808 with the TCG register to which we will write. Used in combination with
3809 the "wout" generators, in some cases we need a new temporary, and in
3810 some cases we can write to a TCG global. */
3812 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3814 o->out = tcg_temp_new_i64();
3816 #define SPEC_prep_new 0
3818 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3820 o->out = tcg_temp_new_i64();
3821 o->out2 = tcg_temp_new_i64();
3823 #define SPEC_prep_new_P 0
3825 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3827 o->out = regs[get_field(f, r1)];
3828 o->g_out = true;
3830 #define SPEC_prep_r1 0
3832 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3834 int r1 = get_field(f, r1);
3835 o->out = regs[r1];
3836 o->out2 = regs[r1 + 1];
3837 o->g_out = o->g_out2 = true;
3839 #define SPEC_prep_r1_P SPEC_r1_even
3841 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3843 o->out = fregs[get_field(f, r1)];
3844 o->g_out = true;
3846 #define SPEC_prep_f1 0
3848 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3850 int r1 = get_field(f, r1);
3851 o->out = fregs[r1];
3852 o->out2 = fregs[r1 + 2];
3853 o->g_out = o->g_out2 = true;
3855 #define SPEC_prep_x1 SPEC_r1_f128
3857 /* ====================================================================== */
3858 /* The "Write OUTput" generators. These generally perform some non-trivial
3859 copy of data to TCG globals, or to main memory. The trivial cases are
3860 generally handled by having a "prep" generator install the TCG global
3861 as the destination of the operation. */
3863 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3865 store_reg(get_field(f, r1), o->out);
3867 #define SPEC_wout_r1 0
3869 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3871 int r1 = get_field(f, r1);
3872 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3874 #define SPEC_wout_r1_8 0
3876 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3878 int r1 = get_field(f, r1);
3879 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3881 #define SPEC_wout_r1_16 0
3883 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3885 store_reg32_i64(get_field(f, r1), o->out);
3887 #define SPEC_wout_r1_32 0
3889 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3891 int r1 = get_field(f, r1);
3892 store_reg32_i64(r1, o->out);
3893 store_reg32_i64(r1 + 1, o->out2);
3895 #define SPEC_wout_r1_P32 SPEC_r1_even
3897 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3899 int r1 = get_field(f, r1);
3900 store_reg32_i64(r1 + 1, o->out);
3901 tcg_gen_shri_i64(o->out, o->out, 32);
3902 store_reg32_i64(r1, o->out);
3904 #define SPEC_wout_r1_D32 SPEC_r1_even
3906 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3908 store_freg32_i64(get_field(f, r1), o->out);
3910 #define SPEC_wout_e1 0
3912 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3914 store_freg(get_field(f, r1), o->out);
3916 #define SPEC_wout_f1 0
3918 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3920 int f1 = get_field(s->fields, r1);
3921 store_freg(f1, o->out);
3922 store_freg(f1 + 2, o->out2);
3924 #define SPEC_wout_x1 SPEC_r1_f128
3926 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3928 if (get_field(f, r1) != get_field(f, r2)) {
3929 store_reg32_i64(get_field(f, r1), o->out);
3932 #define SPEC_wout_cond_r1r2_32 0
3934 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3936 if (get_field(f, r1) != get_field(f, r2)) {
3937 store_freg32_i64(get_field(f, r1), o->out);
3940 #define SPEC_wout_cond_e1e2 0
3942 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3944 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3946 #define SPEC_wout_m1_8 0
3948 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3950 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3952 #define SPEC_wout_m1_16 0
3954 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3956 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3958 #define SPEC_wout_m1_32 0
3960 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3962 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3964 #define SPEC_wout_m1_64 0
3966 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3968 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3970 #define SPEC_wout_m2_32 0
3972 /* ====================================================================== */
3973 /* The "INput 1" generators. These load the first operand to an insn. */
3975 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3977 o->in1 = load_reg(get_field(f, r1));
3979 #define SPEC_in1_r1 0
3981 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3983 o->in1 = regs[get_field(f, r1)];
3984 o->g_in1 = true;
3986 #define SPEC_in1_r1_o 0
3988 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3990 o->in1 = tcg_temp_new_i64();
3991 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3993 #define SPEC_in1_r1_32s 0
3995 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3997 o->in1 = tcg_temp_new_i64();
3998 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4000 #define SPEC_in1_r1_32u 0
4002 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4004 o->in1 = tcg_temp_new_i64();
4005 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4007 #define SPEC_in1_r1_sr32 0
4009 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4011 o->in1 = load_reg(get_field(f, r1) + 1);
4013 #define SPEC_in1_r1p1 SPEC_r1_even
4015 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4017 o->in1 = tcg_temp_new_i64();
4018 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4020 #define SPEC_in1_r1p1_32s SPEC_r1_even
4022 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4024 o->in1 = tcg_temp_new_i64();
4025 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4027 #define SPEC_in1_r1p1_32u SPEC_r1_even
4029 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4031 int r1 = get_field(f, r1);
4032 o->in1 = tcg_temp_new_i64();
4033 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4035 #define SPEC_in1_r1_D32 SPEC_r1_even
4037 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4039 o->in1 = load_reg(get_field(f, r2));
4041 #define SPEC_in1_r2 0
4043 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4045 o->in1 = load_reg(get_field(f, r3));
4047 #define SPEC_in1_r3 0
4049 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4051 o->in1 = regs[get_field(f, r3)];
4052 o->g_in1 = true;
4054 #define SPEC_in1_r3_o 0
4056 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4058 o->in1 = tcg_temp_new_i64();
4059 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4061 #define SPEC_in1_r3_32s 0
4063 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4065 o->in1 = tcg_temp_new_i64();
4066 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4068 #define SPEC_in1_r3_32u 0
4070 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4072 int r3 = get_field(f, r3);
4073 o->in1 = tcg_temp_new_i64();
4074 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4076 #define SPEC_in1_r3_D32 SPEC_r3_even
4078 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4080 o->in1 = load_freg32_i64(get_field(f, r1));
4082 #define SPEC_in1_e1 0
4084 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4086 o->in1 = fregs[get_field(f, r1)];
4087 o->g_in1 = true;
4089 #define SPEC_in1_f1_o 0
4091 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4093 int r1 = get_field(f, r1);
4094 o->out = fregs[r1];
4095 o->out2 = fregs[r1 + 2];
4096 o->g_out = o->g_out2 = true;
4098 #define SPEC_in1_x1_o SPEC_r1_f128
4100 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4102 o->in1 = fregs[get_field(f, r3)];
4103 o->g_in1 = true;
4105 #define SPEC_in1_f3_o 0
4107 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4109 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4111 #define SPEC_in1_la1 0
4113 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4115 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4116 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4118 #define SPEC_in1_la2 0
4120 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4122 in1_la1(s, f, o);
4123 o->in1 = tcg_temp_new_i64();
4124 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4126 #define SPEC_in1_m1_8u 0
4128 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4130 in1_la1(s, f, o);
4131 o->in1 = tcg_temp_new_i64();
4132 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4134 #define SPEC_in1_m1_16s 0
4136 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4138 in1_la1(s, f, o);
4139 o->in1 = tcg_temp_new_i64();
4140 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4142 #define SPEC_in1_m1_16u 0
4144 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4146 in1_la1(s, f, o);
4147 o->in1 = tcg_temp_new_i64();
4148 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4150 #define SPEC_in1_m1_32s 0
4152 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4154 in1_la1(s, f, o);
4155 o->in1 = tcg_temp_new_i64();
4156 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4158 #define SPEC_in1_m1_32u 0
4160 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4162 in1_la1(s, f, o);
4163 o->in1 = tcg_temp_new_i64();
4164 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4166 #define SPEC_in1_m1_64 0
4168 /* ====================================================================== */
4169 /* The "INput 2" generators. These load the second operand to an insn. */
4171 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4173 o->in2 = regs[get_field(f, r1)];
4174 o->g_in2 = true;
4176 #define SPEC_in2_r1_o 0
4178 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4180 o->in2 = tcg_temp_new_i64();
4181 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4183 #define SPEC_in2_r1_16u 0
4185 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4187 o->in2 = tcg_temp_new_i64();
4188 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4190 #define SPEC_in2_r1_32u 0
4192 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4194 int r1 = get_field(f, r1);
4195 o->in2 = tcg_temp_new_i64();
4196 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4198 #define SPEC_in2_r1_D32 SPEC_r1_even
4200 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4202 o->in2 = load_reg(get_field(f, r2));
4204 #define SPEC_in2_r2 0
4206 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4208 o->in2 = regs[get_field(f, r2)];
4209 o->g_in2 = true;
4211 #define SPEC_in2_r2_o 0
4213 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4215 int r2 = get_field(f, r2);
4216 if (r2 != 0) {
4217 o->in2 = load_reg(r2);
4220 #define SPEC_in2_r2_nz 0
4222 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4224 o->in2 = tcg_temp_new_i64();
4225 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4227 #define SPEC_in2_r2_8s 0
4229 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4231 o->in2 = tcg_temp_new_i64();
4232 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4234 #define SPEC_in2_r2_8u 0
4236 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4238 o->in2 = tcg_temp_new_i64();
4239 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4241 #define SPEC_in2_r2_16s 0
4243 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4245 o->in2 = tcg_temp_new_i64();
4246 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4248 #define SPEC_in2_r2_16u 0
4250 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4252 o->in2 = load_reg(get_field(f, r3));
4254 #define SPEC_in2_r3 0
4256 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4258 o->in2 = tcg_temp_new_i64();
4259 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4261 #define SPEC_in2_r2_32s 0
4263 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4265 o->in2 = tcg_temp_new_i64();
4266 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4268 #define SPEC_in2_r2_32u 0
4270 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4272 o->in2 = load_freg32_i64(get_field(f, r2));
4274 #define SPEC_in2_e2 0
4276 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4278 o->in2 = fregs[get_field(f, r2)];
4279 o->g_in2 = true;
4281 #define SPEC_in2_f2_o 0
4283 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4285 int r2 = get_field(f, r2);
4286 o->in1 = fregs[r2];
4287 o->in2 = fregs[r2 + 2];
4288 o->g_in1 = o->g_in2 = true;
4290 #define SPEC_in2_x2_o SPEC_r2_f128
4292 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4294 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4296 #define SPEC_in2_ra2 0
4298 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4300 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4301 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4303 #define SPEC_in2_a2 0
4305 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4307 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4309 #define SPEC_in2_ri2 0
4311 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4313 help_l2_shift(s, f, o, 31);
4315 #define SPEC_in2_sh32 0
4317 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4319 help_l2_shift(s, f, o, 63);
4321 #define SPEC_in2_sh64 0
4323 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4325 in2_a2(s, f, o);
4326 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4328 #define SPEC_in2_m2_8u 0
4330 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4332 in2_a2(s, f, o);
4333 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4335 #define SPEC_in2_m2_16s 0
4337 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4339 in2_a2(s, f, o);
4340 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4342 #define SPEC_in2_m2_16u 0
4344 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4346 in2_a2(s, f, o);
4347 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4349 #define SPEC_in2_m2_32s 0
4351 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4353 in2_a2(s, f, o);
4354 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4356 #define SPEC_in2_m2_32u 0
4358 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4360 in2_a2(s, f, o);
4361 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4363 #define SPEC_in2_m2_64 0
4365 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4367 in2_ri2(s, f, o);
4368 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4370 #define SPEC_in2_mri2_16u 0
4372 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4374 in2_ri2(s, f, o);
4375 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4377 #define SPEC_in2_mri2_32s 0
4379 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4381 in2_ri2(s, f, o);
4382 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4384 #define SPEC_in2_mri2_32u 0
4386 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4388 in2_ri2(s, f, o);
4389 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4391 #define SPEC_in2_mri2_64 0
4393 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4395 o->in2 = tcg_const_i64(get_field(f, i2));
4397 #define SPEC_in2_i2 0
4399 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4401 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4403 #define SPEC_in2_i2_8u 0
4405 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4407 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4409 #define SPEC_in2_i2_16u 0
4411 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4413 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4415 #define SPEC_in2_i2_32u 0
4417 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4419 uint64_t i2 = (uint16_t)get_field(f, i2);
4420 o->in2 = tcg_const_i64(i2 << s->insn->data);
4422 #define SPEC_in2_i2_16u_shl 0
4424 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4426 uint64_t i2 = (uint32_t)get_field(f, i2);
4427 o->in2 = tcg_const_i64(i2 << s->insn->data);
4429 #define SPEC_in2_i2_32u_shl 0
4431 /* ====================================================================== */
4433 /* Find opc within the table of insns. This is formulated as a switch
4434 statement so that (1) we get compile-time notice of cut-paste errors
4435 for duplicated opcodes, and (2) the compiler generates the binary
4436 search tree, rather than us having to post-process the table. */
4438 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4439 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4441 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4443 enum DisasInsnEnum {
4444 #include "insn-data.def"
4447 #undef D
4448 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4449 .opc = OPC, \
4450 .fmt = FMT_##FT, \
4451 .fac = FAC_##FC, \
4452 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4453 .name = #NM, \
4454 .help_in1 = in1_##I1, \
4455 .help_in2 = in2_##I2, \
4456 .help_prep = prep_##P, \
4457 .help_wout = wout_##W, \
4458 .help_cout = cout_##CC, \
4459 .help_op = op_##OP, \
4460 .data = D \
4463 /* Allow 0 to be used for NULL in the table below. */
4464 #define in1_0 NULL
4465 #define in2_0 NULL
4466 #define prep_0 NULL
4467 #define wout_0 NULL
4468 #define cout_0 NULL
4469 #define op_0 NULL
4471 #define SPEC_in1_0 0
4472 #define SPEC_in2_0 0
4473 #define SPEC_prep_0 0
4474 #define SPEC_wout_0 0
4476 static const DisasInsn insn_info[] = {
4477 #include "insn-data.def"
4480 #undef D
4481 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4482 case OPC: return &insn_info[insn_ ## NM];
4484 static const DisasInsn *lookup_opc(uint16_t opc)
4486 switch (opc) {
4487 #include "insn-data.def"
4488 default:
4489 return NULL;
4493 #undef D
4494 #undef C
4496 /* Extract a field from the insn. The INSN should be left-aligned in
4497 the uint64_t so that we can more easily utilize the big-bit-endian
4498 definitions we extract from the Principals of Operation. */
4500 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4502 uint32_t r, m;
4504 if (f->size == 0) {
4505 return;
4508 /* Zero extract the field from the insn. */
4509 r = (insn << f->beg) >> (64 - f->size);
4511 /* Sign-extend, or un-swap the field as necessary. */
4512 switch (f->type) {
4513 case 0: /* unsigned */
4514 break;
4515 case 1: /* signed */
4516 assert(f->size <= 32);
4517 m = 1u << (f->size - 1);
4518 r = (r ^ m) - m;
4519 break;
4520 case 2: /* dl+dh split, signed 20 bit. */
4521 r = ((int8_t)r << 12) | (r >> 8);
4522 break;
4523 default:
4524 abort();
4527 /* Validate that the "compressed" encoding we selected above is valid.
4528 I.e. we havn't make two different original fields overlap. */
4529 assert(((o->presentC >> f->indexC) & 1) == 0);
4530 o->presentC |= 1 << f->indexC;
4531 o->presentO |= 1 << f->indexO;
4533 o->c[f->indexC] = r;
4536 /* Lookup the insn at the current PC, extracting the operands into O and
4537 returning the info struct for the insn. Returns NULL for invalid insn. */
4539 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4540 DisasFields *f)
4542 uint64_t insn, pc = s->pc;
4543 int op, op2, ilen;
4544 const DisasInsn *info;
4546 insn = ld_code2(env, pc);
4547 op = (insn >> 8) & 0xff;
4548 ilen = get_ilen(op);
4549 s->next_pc = s->pc + ilen;
4551 switch (ilen) {
4552 case 2:
4553 insn = insn << 48;
4554 break;
4555 case 4:
4556 insn = ld_code4(env, pc) << 32;
4557 break;
4558 case 6:
4559 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4560 break;
4561 default:
4562 abort();
4565 /* We can't actually determine the insn format until we've looked up
4566 the full insn opcode. Which we can't do without locating the
4567 secondary opcode. Assume by default that OP2 is at bit 40; for
4568 those smaller insns that don't actually have a secondary opcode
4569 this will correctly result in OP2 = 0. */
4570 switch (op) {
4571 case 0x01: /* E */
4572 case 0x80: /* S */
4573 case 0x82: /* S */
4574 case 0x93: /* S */
4575 case 0xb2: /* S, RRF, RRE */
4576 case 0xb3: /* RRE, RRD, RRF */
4577 case 0xb9: /* RRE, RRF */
4578 case 0xe5: /* SSE, SIL */
4579 op2 = (insn << 8) >> 56;
4580 break;
4581 case 0xa5: /* RI */
4582 case 0xa7: /* RI */
4583 case 0xc0: /* RIL */
4584 case 0xc2: /* RIL */
4585 case 0xc4: /* RIL */
4586 case 0xc6: /* RIL */
4587 case 0xc8: /* SSF */
4588 case 0xcc: /* RIL */
4589 op2 = (insn << 12) >> 60;
4590 break;
4591 case 0xd0 ... 0xdf: /* SS */
4592 case 0xe1: /* SS */
4593 case 0xe2: /* SS */
4594 case 0xe8: /* SS */
4595 case 0xe9: /* SS */
4596 case 0xea: /* SS */
4597 case 0xee ... 0xf3: /* SS */
4598 case 0xf8 ... 0xfd: /* SS */
4599 op2 = 0;
4600 break;
4601 default:
4602 op2 = (insn << 40) >> 56;
4603 break;
4606 memset(f, 0, sizeof(*f));
4607 f->op = op;
4608 f->op2 = op2;
4610 /* Lookup the instruction. */
4611 info = lookup_opc(op << 8 | op2);
4613 /* If we found it, extract the operands. */
4614 if (info != NULL) {
4615 DisasFormat fmt = info->fmt;
4616 int i;
4618 for (i = 0; i < NUM_C_FIELD; ++i) {
4619 extract_field(f, &format_info[fmt].op[i], insn);
4622 return info;
4625 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4627 const DisasInsn *insn;
4628 ExitStatus ret = NO_EXIT;
4629 DisasFields f;
4630 DisasOps o;
4632 /* Search for the insn in the table. */
4633 insn = extract_insn(env, s, &f);
4635 /* Not found means unimplemented/illegal opcode. */
4636 if (insn == NULL) {
4637 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4638 f.op, f.op2);
4639 gen_illegal_opcode(s);
4640 return EXIT_NORETURN;
4643 /* Check for insn specification exceptions. */
4644 if (insn->spec) {
4645 int spec = insn->spec, excp = 0, r;
4647 if (spec & SPEC_r1_even) {
4648 r = get_field(&f, r1);
4649 if (r & 1) {
4650 excp = PGM_SPECIFICATION;
4653 if (spec & SPEC_r2_even) {
4654 r = get_field(&f, r2);
4655 if (r & 1) {
4656 excp = PGM_SPECIFICATION;
4659 if (spec & SPEC_r3_even) {
4660 r = get_field(&f, r3);
4661 if (r & 1) {
4662 excp = PGM_SPECIFICATION;
4665 if (spec & SPEC_r1_f128) {
4666 r = get_field(&f, r1);
4667 if (r > 13) {
4668 excp = PGM_SPECIFICATION;
4671 if (spec & SPEC_r2_f128) {
4672 r = get_field(&f, r2);
4673 if (r > 13) {
4674 excp = PGM_SPECIFICATION;
4677 if (excp) {
4678 gen_program_exception(s, excp);
4679 return EXIT_NORETURN;
4683 /* Set up the strutures we use to communicate with the helpers. */
4684 s->insn = insn;
4685 s->fields = &f;
4686 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4687 TCGV_UNUSED_I64(o.out);
4688 TCGV_UNUSED_I64(o.out2);
4689 TCGV_UNUSED_I64(o.in1);
4690 TCGV_UNUSED_I64(o.in2);
4691 TCGV_UNUSED_I64(o.addr1);
4693 /* Implement the instruction. */
4694 if (insn->help_in1) {
4695 insn->help_in1(s, &f, &o);
4697 if (insn->help_in2) {
4698 insn->help_in2(s, &f, &o);
4700 if (insn->help_prep) {
4701 insn->help_prep(s, &f, &o);
4703 if (insn->help_op) {
4704 ret = insn->help_op(s, &o);
4706 if (insn->help_wout) {
4707 insn->help_wout(s, &f, &o);
4709 if (insn->help_cout) {
4710 insn->help_cout(s, &o);
4713 /* Free any temporaries created by the helpers. */
4714 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4715 tcg_temp_free_i64(o.out);
4717 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4718 tcg_temp_free_i64(o.out2);
4720 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4721 tcg_temp_free_i64(o.in1);
4723 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4724 tcg_temp_free_i64(o.in2);
4726 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4727 tcg_temp_free_i64(o.addr1);
4730 /* Advance to the next instruction. */
4731 s->pc = s->next_pc;
4732 return ret;
4735 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4736 TranslationBlock *tb,
4737 bool search_pc)
4739 CPUState *cs = CPU(cpu);
4740 CPUS390XState *env = &cpu->env;
4741 DisasContext dc;
4742 target_ulong pc_start;
4743 uint64_t next_page_start;
4744 uint16_t *gen_opc_end;
4745 int j, lj = -1;
4746 int num_insns, max_insns;
4747 CPUBreakpoint *bp;
4748 ExitStatus status;
4749 bool do_debug;
4751 pc_start = tb->pc;
4753 /* 31-bit mode */
4754 if (!(tb->flags & FLAG_MASK_64)) {
4755 pc_start &= 0x7fffffff;
4758 dc.tb = tb;
4759 dc.pc = pc_start;
4760 dc.cc_op = CC_OP_DYNAMIC;
4761 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4763 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4765 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4767 num_insns = 0;
4768 max_insns = tb->cflags & CF_COUNT_MASK;
4769 if (max_insns == 0) {
4770 max_insns = CF_COUNT_MASK;
4773 gen_tb_start();
4775 do {
4776 if (search_pc) {
4777 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4778 if (lj < j) {
4779 lj++;
4780 while (lj < j) {
4781 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4784 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4785 gen_opc_cc_op[lj] = dc.cc_op;
4786 tcg_ctx.gen_opc_instr_start[lj] = 1;
4787 tcg_ctx.gen_opc_icount[lj] = num_insns;
4789 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4790 gen_io_start();
4793 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4794 tcg_gen_debug_insn_start(dc.pc);
4797 status = NO_EXIT;
4798 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4799 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4800 if (bp->pc == dc.pc) {
4801 status = EXIT_PC_STALE;
4802 do_debug = true;
4803 break;
4807 if (status == NO_EXIT) {
4808 status = translate_one(env, &dc);
4811 /* If we reach a page boundary, are single stepping,
4812 or exhaust instruction count, stop generation. */
4813 if (status == NO_EXIT
4814 && (dc.pc >= next_page_start
4815 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4816 || num_insns >= max_insns
4817 || singlestep
4818 || cs->singlestep_enabled)) {
4819 status = EXIT_PC_STALE;
4821 } while (status == NO_EXIT);
4823 if (tb->cflags & CF_LAST_IO) {
4824 gen_io_end();
4827 switch (status) {
4828 case EXIT_GOTO_TB:
4829 case EXIT_NORETURN:
4830 break;
4831 case EXIT_PC_STALE:
4832 update_psw_addr(&dc);
4833 /* FALLTHRU */
4834 case EXIT_PC_UPDATED:
4835 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4836 cc op type is in env */
4837 update_cc_op(&dc);
4838 /* Exit the TB, either by raising a debug exception or by return. */
4839 if (do_debug) {
4840 gen_exception(EXCP_DEBUG);
4841 } else {
4842 tcg_gen_exit_tb(0);
4844 break;
4845 default:
4846 abort();
4849 gen_tb_end(tb, num_insns);
4850 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4851 if (search_pc) {
4852 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4853 lj++;
4854 while (lj <= j) {
4855 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4857 } else {
4858 tb->size = dc.pc - pc_start;
4859 tb->icount = num_insns;
4862 #if defined(S390X_DEBUG_DISAS)
4863 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4864 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4865 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4866 qemu_log("\n");
4868 #endif
4871 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4873 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
4876 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4878 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
4881 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4883 int cc_op;
4884 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4885 cc_op = gen_opc_cc_op[pc_pos];
4886 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4887 env->cc_op = cc_op;