qemu-coroutine-io: fix for Win32
[qemu/ar7.git] / target-s390x / translate.c
blob0cb036f667c2d1f9b7c52687c0ec531c159277b9
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
74 #define DISAS_EXCP 4
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
88 return pc;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
124 #ifndef CONFIG_USER_ONLY
125 for (i = 0; i < 16; i++) {
126 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
127 if ((i % 4) == 3) {
128 cpu_fprintf(f, "\n");
129 } else {
130 cpu_fprintf(f, " ");
133 #endif
135 #ifdef DEBUG_INLINE_BRANCHES
136 for (i = 0; i < CC_OP_MAX; i++) {
137 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
138 inline_branch_miss[i], inline_branch_hit[i]);
140 #endif
142 cpu_fprintf(f, "\n");
145 static TCGv_i64 psw_addr;
146 static TCGv_i64 psw_mask;
148 static TCGv_i32 cc_op;
149 static TCGv_i64 cc_src;
150 static TCGv_i64 cc_dst;
151 static TCGv_i64 cc_vr;
153 static char cpu_reg_names[32][4];
154 static TCGv_i64 regs[16];
155 static TCGv_i64 fregs[16];
157 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
159 void s390x_translate_init(void)
161 int i;
163 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
164 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
165 offsetof(CPUS390XState, psw.addr),
166 "psw_addr");
167 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
168 offsetof(CPUS390XState, psw.mask),
169 "psw_mask");
171 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
172 "cc_op");
173 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
174 "cc_src");
175 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
176 "cc_dst");
177 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
178 "cc_vr");
180 for (i = 0; i < 16; i++) {
181 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
182 regs[i] = tcg_global_mem_new(TCG_AREG0,
183 offsetof(CPUS390XState, regs[i]),
184 cpu_reg_names[i]);
187 for (i = 0; i < 16; i++) {
188 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
189 fregs[i] = tcg_global_mem_new(TCG_AREG0,
190 offsetof(CPUS390XState, fregs[i].d),
191 cpu_reg_names[i + 16]);
195 static TCGv_i64 load_reg(int reg)
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
202 static TCGv_i64 load_freg32_i64(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
209 static void store_reg(int reg, TCGv_i64 v)
211 tcg_gen_mov_i64(regs[reg], v);
214 static void store_freg(int reg, TCGv_i64 v)
216 tcg_gen_mov_i64(fregs[reg], v);
219 static void store_reg32_i64(int reg, TCGv_i64 v)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
230 static void store_freg32_i64(int reg, TCGv_i64 v)
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
235 static void return_low128(TCGv_i64 dest)
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
240 static void update_psw_addr(DisasContext *s)
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
246 static void update_cc_op(DisasContext *s)
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
253 static void potential_page_fault(DisasContext *s)
255 update_psw_addr(s);
256 update_cc_op(s);
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
261 return (uint64_t)cpu_lduw_code(env, pc);
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
269 static int get_mem_index(DisasContext *s)
271 switch (s->tb->flags & FLAG_MASK_ASC) {
272 case PSW_ASC_PRIMARY >> 32:
273 return 0;
274 case PSW_ASC_SECONDARY >> 32:
275 return 1;
276 case PSW_ASC_HOME >> 32:
277 return 2;
278 default:
279 tcg_abort();
280 break;
284 static void gen_exception(int excp)
286 TCGv_i32 tmp = tcg_const_i32(excp);
287 gen_helper_exception(cpu_env, tmp);
288 tcg_temp_free_i32(tmp);
291 static void gen_program_exception(DisasContext *s, int code)
293 TCGv_i32 tmp;
295 /* Remember what pgm exeption this was. */
296 tmp = tcg_const_i32(code);
297 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
298 tcg_temp_free_i32(tmp);
300 tmp = tcg_const_i32(s->next_pc - s->pc);
301 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
302 tcg_temp_free_i32(tmp);
304 /* Advance past instruction. */
305 s->pc = s->next_pc;
306 update_psw_addr(s);
308 /* Save off cc. */
309 update_cc_op(s);
311 /* Trigger exception. */
312 gen_exception(EXCP_PGM);
315 static inline void gen_illegal_opcode(DisasContext *s)
317 gen_program_exception(s, PGM_SPECIFICATION);
320 static inline void check_privileged(DisasContext *s)
322 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
323 gen_program_exception(s, PGM_PRIVILEGED);
327 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
329 TCGv_i64 tmp = tcg_temp_new_i64();
330 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
332 /* Note that d2 is limited to 20 bits, signed. If we crop negative
333 displacements early we create larger immedate addends. */
335 /* Note that addi optimizes the imm==0 case. */
336 if (b2 && x2) {
337 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
338 tcg_gen_addi_i64(tmp, tmp, d2);
339 } else if (b2) {
340 tcg_gen_addi_i64(tmp, regs[b2], d2);
341 } else if (x2) {
342 tcg_gen_addi_i64(tmp, regs[x2], d2);
343 } else {
344 if (need_31) {
345 d2 &= 0x7fffffff;
346 need_31 = false;
348 tcg_gen_movi_i64(tmp, d2);
350 if (need_31) {
351 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
354 return tmp;
357 static inline bool live_cc_data(DisasContext *s)
359 return (s->cc_op != CC_OP_DYNAMIC
360 && s->cc_op != CC_OP_STATIC
361 && s->cc_op > 3);
364 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
366 if (live_cc_data(s)) {
367 tcg_gen_discard_i64(cc_src);
368 tcg_gen_discard_i64(cc_dst);
369 tcg_gen_discard_i64(cc_vr);
371 s->cc_op = CC_OP_CONST0 + val;
374 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
376 if (live_cc_data(s)) {
377 tcg_gen_discard_i64(cc_src);
378 tcg_gen_discard_i64(cc_vr);
380 tcg_gen_mov_i64(cc_dst, dst);
381 s->cc_op = op;
384 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
385 TCGv_i64 dst)
387 if (live_cc_data(s)) {
388 tcg_gen_discard_i64(cc_vr);
390 tcg_gen_mov_i64(cc_src, src);
391 tcg_gen_mov_i64(cc_dst, dst);
392 s->cc_op = op;
395 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
396 TCGv_i64 dst, TCGv_i64 vr)
398 tcg_gen_mov_i64(cc_src, src);
399 tcg_gen_mov_i64(cc_dst, dst);
400 tcg_gen_mov_i64(cc_vr, vr);
401 s->cc_op = op;
404 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
406 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
409 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
411 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
414 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
416 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
419 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
421 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
424 /* CC value is in env->cc_op */
425 static void set_cc_static(DisasContext *s)
427 if (live_cc_data(s)) {
428 tcg_gen_discard_i64(cc_src);
429 tcg_gen_discard_i64(cc_dst);
430 tcg_gen_discard_i64(cc_vr);
432 s->cc_op = CC_OP_STATIC;
435 /* calculates cc into cc_op */
436 static void gen_op_calc_cc(DisasContext *s)
438 TCGv_i32 local_cc_op;
439 TCGv_i64 dummy;
441 TCGV_UNUSED_I32(local_cc_op);
442 TCGV_UNUSED_I64(dummy);
443 switch (s->cc_op) {
444 default:
445 dummy = tcg_const_i64(0);
446 /* FALLTHRU */
447 case CC_OP_ADD_64:
448 case CC_OP_ADDU_64:
449 case CC_OP_ADDC_64:
450 case CC_OP_SUB_64:
451 case CC_OP_SUBU_64:
452 case CC_OP_SUBB_64:
453 case CC_OP_ADD_32:
454 case CC_OP_ADDU_32:
455 case CC_OP_ADDC_32:
456 case CC_OP_SUB_32:
457 case CC_OP_SUBU_32:
458 case CC_OP_SUBB_32:
459 local_cc_op = tcg_const_i32(s->cc_op);
460 break;
461 case CC_OP_CONST0:
462 case CC_OP_CONST1:
463 case CC_OP_CONST2:
464 case CC_OP_CONST3:
465 case CC_OP_STATIC:
466 case CC_OP_DYNAMIC:
467 break;
470 switch (s->cc_op) {
471 case CC_OP_CONST0:
472 case CC_OP_CONST1:
473 case CC_OP_CONST2:
474 case CC_OP_CONST3:
475 /* s->cc_op is the cc value */
476 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
477 break;
478 case CC_OP_STATIC:
479 /* env->cc_op already is the cc value */
480 break;
481 case CC_OP_NZ:
482 case CC_OP_ABS_64:
483 case CC_OP_NABS_64:
484 case CC_OP_ABS_32:
485 case CC_OP_NABS_32:
486 case CC_OP_LTGT0_32:
487 case CC_OP_LTGT0_64:
488 case CC_OP_COMP_32:
489 case CC_OP_COMP_64:
490 case CC_OP_NZ_F32:
491 case CC_OP_NZ_F64:
492 case CC_OP_FLOGR:
493 /* 1 argument */
494 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
495 break;
496 case CC_OP_ICM:
497 case CC_OP_LTGT_32:
498 case CC_OP_LTGT_64:
499 case CC_OP_LTUGTU_32:
500 case CC_OP_LTUGTU_64:
501 case CC_OP_TM_32:
502 case CC_OP_TM_64:
503 case CC_OP_SLA_32:
504 case CC_OP_SLA_64:
505 case CC_OP_NZ_F128:
506 /* 2 arguments */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
508 break;
509 case CC_OP_ADD_64:
510 case CC_OP_ADDU_64:
511 case CC_OP_ADDC_64:
512 case CC_OP_SUB_64:
513 case CC_OP_SUBU_64:
514 case CC_OP_SUBB_64:
515 case CC_OP_ADD_32:
516 case CC_OP_ADDU_32:
517 case CC_OP_ADDC_32:
518 case CC_OP_SUB_32:
519 case CC_OP_SUBU_32:
520 case CC_OP_SUBB_32:
521 /* 3 arguments */
522 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
523 break;
524 case CC_OP_DYNAMIC:
525 /* unknown operation - assume 3 arguments and cc_op in env */
526 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
527 break;
528 default:
529 tcg_abort();
532 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
533 tcg_temp_free_i32(local_cc_op);
535 if (!TCGV_IS_UNUSED_I64(dummy)) {
536 tcg_temp_free_i64(dummy);
539 /* We now have cc in cc_op as constant */
540 set_cc_static(s);
543 static int use_goto_tb(DisasContext *s, uint64_t dest)
545 /* NOTE: we handle the case where the TB spans two pages here */
546 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
547 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
548 && !s->singlestep_enabled
549 && !(s->tb->cflags & CF_LAST_IO));
552 static void account_noninline_branch(DisasContext *s, int cc_op)
554 #ifdef DEBUG_INLINE_BRANCHES
555 inline_branch_miss[cc_op]++;
556 #endif
559 static void account_inline_branch(DisasContext *s, int cc_op)
561 #ifdef DEBUG_INLINE_BRANCHES
562 inline_branch_hit[cc_op]++;
563 #endif
566 /* Table of mask values to comparison codes, given a comparison as input.
567 For such, CC=3 should not be possible. */
568 static const TCGCond ltgt_cond[16] = {
569 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
570 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
571 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
572 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
573 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
574 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
575 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
576 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
579 /* Table of mask values to comparison codes, given a logic op as input.
580 For such, only CC=0 and CC=1 should be possible. */
581 static const TCGCond nz_cond[16] = {
582 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
583 TCG_COND_NEVER, TCG_COND_NEVER,
584 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
585 TCG_COND_NE, TCG_COND_NE,
586 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
587 TCG_COND_EQ, TCG_COND_EQ,
588 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
589 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
592 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
593 details required to generate a TCG comparison. */
594 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
596 TCGCond cond;
597 enum cc_op old_cc_op = s->cc_op;
599 if (mask == 15 || mask == 0) {
600 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
601 c->u.s32.a = cc_op;
602 c->u.s32.b = cc_op;
603 c->g1 = c->g2 = true;
604 c->is_64 = false;
605 return;
608 /* Find the TCG condition for the mask + cc op. */
609 switch (old_cc_op) {
610 case CC_OP_LTGT0_32:
611 case CC_OP_LTGT0_64:
612 case CC_OP_LTGT_32:
613 case CC_OP_LTGT_64:
614 cond = ltgt_cond[mask];
615 if (cond == TCG_COND_NEVER) {
616 goto do_dynamic;
618 account_inline_branch(s, old_cc_op);
619 break;
621 case CC_OP_LTUGTU_32:
622 case CC_OP_LTUGTU_64:
623 cond = tcg_unsigned_cond(ltgt_cond[mask]);
624 if (cond == TCG_COND_NEVER) {
625 goto do_dynamic;
627 account_inline_branch(s, old_cc_op);
628 break;
630 case CC_OP_NZ:
631 cond = nz_cond[mask];
632 if (cond == TCG_COND_NEVER) {
633 goto do_dynamic;
635 account_inline_branch(s, old_cc_op);
636 break;
638 case CC_OP_TM_32:
639 case CC_OP_TM_64:
640 switch (mask) {
641 case 8:
642 cond = TCG_COND_EQ;
643 break;
644 case 4 | 2 | 1:
645 cond = TCG_COND_NE;
646 break;
647 default:
648 goto do_dynamic;
650 account_inline_branch(s, old_cc_op);
651 break;
653 case CC_OP_ICM:
654 switch (mask) {
655 case 8:
656 cond = TCG_COND_EQ;
657 break;
658 case 4 | 2 | 1:
659 case 4 | 2:
660 cond = TCG_COND_NE;
661 break;
662 default:
663 goto do_dynamic;
665 account_inline_branch(s, old_cc_op);
666 break;
668 case CC_OP_FLOGR:
669 switch (mask & 0xa) {
670 case 8: /* src == 0 -> no one bit found */
671 cond = TCG_COND_EQ;
672 break;
673 case 2: /* src != 0 -> one bit found */
674 cond = TCG_COND_NE;
675 break;
676 default:
677 goto do_dynamic;
679 account_inline_branch(s, old_cc_op);
680 break;
682 case CC_OP_ADDU_32:
683 case CC_OP_ADDU_64:
684 switch (mask) {
685 case 8 | 2: /* vr == 0 */
686 cond = TCG_COND_EQ;
687 break;
688 case 4 | 1: /* vr != 0 */
689 cond = TCG_COND_NE;
690 break;
691 case 8 | 4: /* no carry -> vr >= src */
692 cond = TCG_COND_GEU;
693 break;
694 case 2 | 1: /* carry -> vr < src */
695 cond = TCG_COND_LTU;
696 break;
697 default:
698 goto do_dynamic;
700 account_inline_branch(s, old_cc_op);
701 break;
703 case CC_OP_SUBU_32:
704 case CC_OP_SUBU_64:
705 /* Note that CC=0 is impossible; treat it as dont-care. */
706 switch (mask & 7) {
707 case 2: /* zero -> op1 == op2 */
708 cond = TCG_COND_EQ;
709 break;
710 case 4 | 1: /* !zero -> op1 != op2 */
711 cond = TCG_COND_NE;
712 break;
713 case 4: /* borrow (!carry) -> op1 < op2 */
714 cond = TCG_COND_LTU;
715 break;
716 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
717 cond = TCG_COND_GEU;
718 break;
719 default:
720 goto do_dynamic;
722 account_inline_branch(s, old_cc_op);
723 break;
725 default:
726 do_dynamic:
727 /* Calculate cc value. */
728 gen_op_calc_cc(s);
729 /* FALLTHRU */
731 case CC_OP_STATIC:
732 /* Jump based on CC. We'll load up the real cond below;
733 the assignment here merely avoids a compiler warning. */
734 account_noninline_branch(s, old_cc_op);
735 old_cc_op = CC_OP_STATIC;
736 cond = TCG_COND_NEVER;
737 break;
740 /* Load up the arguments of the comparison. */
741 c->is_64 = true;
742 c->g1 = c->g2 = false;
743 switch (old_cc_op) {
744 case CC_OP_LTGT0_32:
745 c->is_64 = false;
746 c->u.s32.a = tcg_temp_new_i32();
747 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
748 c->u.s32.b = tcg_const_i32(0);
749 break;
750 case CC_OP_LTGT_32:
751 case CC_OP_LTUGTU_32:
752 case CC_OP_SUBU_32:
753 c->is_64 = false;
754 c->u.s32.a = tcg_temp_new_i32();
755 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
756 c->u.s32.b = tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
758 break;
760 case CC_OP_LTGT0_64:
761 case CC_OP_NZ:
762 case CC_OP_FLOGR:
763 c->u.s64.a = cc_dst;
764 c->u.s64.b = tcg_const_i64(0);
765 c->g1 = true;
766 break;
767 case CC_OP_LTGT_64:
768 case CC_OP_LTUGTU_64:
769 case CC_OP_SUBU_64:
770 c->u.s64.a = cc_src;
771 c->u.s64.b = cc_dst;
772 c->g1 = c->g2 = true;
773 break;
775 case CC_OP_TM_32:
776 case CC_OP_TM_64:
777 case CC_OP_ICM:
778 c->u.s64.a = tcg_temp_new_i64();
779 c->u.s64.b = tcg_const_i64(0);
780 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
781 break;
783 case CC_OP_ADDU_32:
784 c->is_64 = false;
785 c->u.s32.a = tcg_temp_new_i32();
786 c->u.s32.b = tcg_temp_new_i32();
787 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
788 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
789 tcg_gen_movi_i32(c->u.s32.b, 0);
790 } else {
791 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
793 break;
795 case CC_OP_ADDU_64:
796 c->u.s64.a = cc_vr;
797 c->g1 = true;
798 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
799 c->u.s64.b = tcg_const_i64(0);
800 } else {
801 c->u.s64.b = cc_src;
802 c->g2 = true;
804 break;
806 case CC_OP_STATIC:
807 c->is_64 = false;
808 c->u.s32.a = cc_op;
809 c->g1 = true;
810 switch (mask) {
811 case 0x8 | 0x4 | 0x2: /* cc != 3 */
812 cond = TCG_COND_NE;
813 c->u.s32.b = tcg_const_i32(3);
814 break;
815 case 0x8 | 0x4 | 0x1: /* cc != 2 */
816 cond = TCG_COND_NE;
817 c->u.s32.b = tcg_const_i32(2);
818 break;
819 case 0x8 | 0x2 | 0x1: /* cc != 1 */
820 cond = TCG_COND_NE;
821 c->u.s32.b = tcg_const_i32(1);
822 break;
823 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
824 cond = TCG_COND_EQ;
825 c->g1 = false;
826 c->u.s32.a = tcg_temp_new_i32();
827 c->u.s32.b = tcg_const_i32(0);
828 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
829 break;
830 case 0x8 | 0x4: /* cc < 2 */
831 cond = TCG_COND_LTU;
832 c->u.s32.b = tcg_const_i32(2);
833 break;
834 case 0x8: /* cc == 0 */
835 cond = TCG_COND_EQ;
836 c->u.s32.b = tcg_const_i32(0);
837 break;
838 case 0x4 | 0x2 | 0x1: /* cc != 0 */
839 cond = TCG_COND_NE;
840 c->u.s32.b = tcg_const_i32(0);
841 break;
842 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
843 cond = TCG_COND_NE;
844 c->g1 = false;
845 c->u.s32.a = tcg_temp_new_i32();
846 c->u.s32.b = tcg_const_i32(0);
847 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
848 break;
849 case 0x4: /* cc == 1 */
850 cond = TCG_COND_EQ;
851 c->u.s32.b = tcg_const_i32(1);
852 break;
853 case 0x2 | 0x1: /* cc > 1 */
854 cond = TCG_COND_GTU;
855 c->u.s32.b = tcg_const_i32(1);
856 break;
857 case 0x2: /* cc == 2 */
858 cond = TCG_COND_EQ;
859 c->u.s32.b = tcg_const_i32(2);
860 break;
861 case 0x1: /* cc == 3 */
862 cond = TCG_COND_EQ;
863 c->u.s32.b = tcg_const_i32(3);
864 break;
865 default:
866 /* CC is masked by something else: (8 >> cc) & mask. */
867 cond = TCG_COND_NE;
868 c->g1 = false;
869 c->u.s32.a = tcg_const_i32(8);
870 c->u.s32.b = tcg_const_i32(0);
871 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
872 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
873 break;
875 break;
877 default:
878 abort();
880 c->cond = cond;
883 static void free_compare(DisasCompare *c)
885 if (!c->g1) {
886 if (c->is_64) {
887 tcg_temp_free_i64(c->u.s64.a);
888 } else {
889 tcg_temp_free_i32(c->u.s32.a);
892 if (!c->g2) {
893 if (c->is_64) {
894 tcg_temp_free_i64(c->u.s64.b);
895 } else {
896 tcg_temp_free_i32(c->u.s32.b);
901 /* ====================================================================== */
902 /* Define the insn format enumeration. */
903 #define F0(N) FMT_##N,
904 #define F1(N, X1) F0(N)
905 #define F2(N, X1, X2) F0(N)
906 #define F3(N, X1, X2, X3) F0(N)
907 #define F4(N, X1, X2, X3, X4) F0(N)
908 #define F5(N, X1, X2, X3, X4, X5) F0(N)
910 typedef enum {
911 #include "insn-format.def"
912 } DisasFormat;
914 #undef F0
915 #undef F1
916 #undef F2
917 #undef F3
918 #undef F4
919 #undef F5
921 /* Define a structure to hold the decoded fields. We'll store each inside
922 an array indexed by an enum. In order to conserve memory, we'll arrange
923 for fields that do not exist at the same time to overlap, thus the "C"
924 for compact. For checking purposes there is an "O" for original index
925 as well that will be applied to availability bitmaps. */
927 enum DisasFieldIndexO {
928 FLD_O_r1,
929 FLD_O_r2,
930 FLD_O_r3,
931 FLD_O_m1,
932 FLD_O_m3,
933 FLD_O_m4,
934 FLD_O_b1,
935 FLD_O_b2,
936 FLD_O_b4,
937 FLD_O_d1,
938 FLD_O_d2,
939 FLD_O_d4,
940 FLD_O_x2,
941 FLD_O_l1,
942 FLD_O_l2,
943 FLD_O_i1,
944 FLD_O_i2,
945 FLD_O_i3,
946 FLD_O_i4,
947 FLD_O_i5
950 enum DisasFieldIndexC {
951 FLD_C_r1 = 0,
952 FLD_C_m1 = 0,
953 FLD_C_b1 = 0,
954 FLD_C_i1 = 0,
956 FLD_C_r2 = 1,
957 FLD_C_b2 = 1,
958 FLD_C_i2 = 1,
960 FLD_C_r3 = 2,
961 FLD_C_m3 = 2,
962 FLD_C_i3 = 2,
964 FLD_C_m4 = 3,
965 FLD_C_b4 = 3,
966 FLD_C_i4 = 3,
967 FLD_C_l1 = 3,
969 FLD_C_i5 = 4,
970 FLD_C_d1 = 4,
972 FLD_C_d2 = 5,
974 FLD_C_d4 = 6,
975 FLD_C_x2 = 6,
976 FLD_C_l2 = 6,
978 NUM_C_FIELD = 7
981 struct DisasFields {
982 unsigned op:8;
983 unsigned op2:8;
984 unsigned presentC:16;
985 unsigned int presentO;
986 int c[NUM_C_FIELD];
989 /* This is the way fields are to be accessed out of DisasFields. */
990 #define have_field(S, F) have_field1((S), FLD_O_##F)
991 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
993 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
995 return (f->presentO >> c) & 1;
998 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
999 enum DisasFieldIndexC c)
1001 assert(have_field1(f, o));
1002 return f->c[c];
1005 /* Describe the layout of each field in each format. */
1006 typedef struct DisasField {
1007 unsigned int beg:8;
1008 unsigned int size:8;
1009 unsigned int type:2;
1010 unsigned int indexC:6;
1011 enum DisasFieldIndexO indexO:8;
1012 } DisasField;
1014 typedef struct DisasFormatInfo {
1015 DisasField op[NUM_C_FIELD];
1016 } DisasFormatInfo;
1018 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1019 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1020 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1022 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1024 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1025 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1027 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1029 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1030 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1031 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1033 #define F0(N) { { } },
1034 #define F1(N, X1) { { X1 } },
1035 #define F2(N, X1, X2) { { X1, X2 } },
1036 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1037 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1038 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1040 static const DisasFormatInfo format_info[] = {
1041 #include "insn-format.def"
1044 #undef F0
1045 #undef F1
1046 #undef F2
1047 #undef F3
1048 #undef F4
1049 #undef F5
1050 #undef R
1051 #undef M
1052 #undef BD
1053 #undef BXD
1054 #undef BDL
1055 #undef BXDL
1056 #undef I
1057 #undef L
1059 /* Generally, we'll extract operands into this structures, operate upon
1060 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1061 of routines below for more details. */
1062 typedef struct {
1063 bool g_out, g_out2, g_in1, g_in2;
1064 TCGv_i64 out, out2, in1, in2;
1065 TCGv_i64 addr1;
1066 } DisasOps;
1068 /* Instructions can place constraints on their operands, raising specification
1069 exceptions if they are violated. To make this easy to automate, each "in1",
1070 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1071 of the following, or 0. To make this easy to document, we'll put the
1072 SPEC_<name> defines next to <name>. */
1074 #define SPEC_r1_even 1
1075 #define SPEC_r2_even 2
1076 #define SPEC_r3_even 4
1077 #define SPEC_r1_f128 8
1078 #define SPEC_r2_f128 16
1080 /* Return values from translate_one, indicating the state of the TB. */
1081 typedef enum {
1082 /* Continue the TB. */
1083 NO_EXIT,
1084 /* We have emitted one or more goto_tb. No fixup required. */
1085 EXIT_GOTO_TB,
1086 /* We are not using a goto_tb (for whatever reason), but have updated
1087 the PC (for whatever reason), so there's no need to do it again on
1088 exiting the TB. */
1089 EXIT_PC_UPDATED,
1090 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1091 updated the PC for the next instruction to be executed. */
1092 EXIT_PC_STALE,
1093 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1094 No following code will be executed. */
1095 EXIT_NORETURN,
1096 } ExitStatus;
1098 typedef enum DisasFacility {
1099 FAC_Z, /* zarch (default) */
1100 FAC_CASS, /* compare and swap and store */
1101 FAC_CASS2, /* compare and swap and store 2*/
1102 FAC_DFP, /* decimal floating point */
1103 FAC_DFPR, /* decimal floating point rounding */
1104 FAC_DO, /* distinct operands */
1105 FAC_EE, /* execute extensions */
1106 FAC_EI, /* extended immediate */
1107 FAC_FPE, /* floating point extension */
1108 FAC_FPSSH, /* floating point support sign handling */
1109 FAC_FPRGR, /* FPR-GR transfer */
1110 FAC_GIE, /* general instructions extension */
1111 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1112 FAC_HW, /* high-word */
1113 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1114 FAC_LOC, /* load/store on condition */
1115 FAC_LD, /* long displacement */
1116 FAC_PC, /* population count */
1117 FAC_SCF, /* store clock fast */
1118 FAC_SFLE, /* store facility list extended */
1119 } DisasFacility;
1121 struct DisasInsn {
1122 unsigned opc:16;
1123 DisasFormat fmt:8;
1124 DisasFacility fac:8;
1125 unsigned spec:8;
1127 const char *name;
1129 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1130 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1131 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1132 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_cout)(DisasContext *, DisasOps *);
1134 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1136 uint64_t data;
1139 /* ====================================================================== */
1140 /* Miscellaneous helpers, used by several operations. */
1142 static void help_l2_shift(DisasContext *s, DisasFields *f,
1143 DisasOps *o, int mask)
1145 int b2 = get_field(f, b2);
1146 int d2 = get_field(f, d2);
1148 if (b2 == 0) {
1149 o->in2 = tcg_const_i64(d2 & mask);
1150 } else {
1151 o->in2 = get_address(s, 0, b2, d2);
1152 tcg_gen_andi_i64(o->in2, o->in2, mask);
1156 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1158 if (dest == s->next_pc) {
1159 return NO_EXIT;
1161 if (use_goto_tb(s, dest)) {
1162 update_cc_op(s);
1163 tcg_gen_goto_tb(0);
1164 tcg_gen_movi_i64(psw_addr, dest);
1165 tcg_gen_exit_tb((uintptr_t)s->tb);
1166 return EXIT_GOTO_TB;
1167 } else {
1168 tcg_gen_movi_i64(psw_addr, dest);
1169 return EXIT_PC_UPDATED;
1173 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1174 bool is_imm, int imm, TCGv_i64 cdest)
1176 ExitStatus ret;
1177 uint64_t dest = s->pc + 2 * imm;
1178 int lab;
1180 /* Take care of the special cases first. */
1181 if (c->cond == TCG_COND_NEVER) {
1182 ret = NO_EXIT;
1183 goto egress;
1185 if (is_imm) {
1186 if (dest == s->next_pc) {
1187 /* Branch to next. */
1188 ret = NO_EXIT;
1189 goto egress;
1191 if (c->cond == TCG_COND_ALWAYS) {
1192 ret = help_goto_direct(s, dest);
1193 goto egress;
1195 } else {
1196 if (TCGV_IS_UNUSED_I64(cdest)) {
1197 /* E.g. bcr %r0 -> no branch. */
1198 ret = NO_EXIT;
1199 goto egress;
1201 if (c->cond == TCG_COND_ALWAYS) {
1202 tcg_gen_mov_i64(psw_addr, cdest);
1203 ret = EXIT_PC_UPDATED;
1204 goto egress;
1208 if (use_goto_tb(s, s->next_pc)) {
1209 if (is_imm && use_goto_tb(s, dest)) {
1210 /* Both exits can use goto_tb. */
1211 update_cc_op(s);
1213 lab = gen_new_label();
1214 if (c->is_64) {
1215 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1216 } else {
1217 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1220 /* Branch not taken. */
1221 tcg_gen_goto_tb(0);
1222 tcg_gen_movi_i64(psw_addr, s->next_pc);
1223 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1225 /* Branch taken. */
1226 gen_set_label(lab);
1227 tcg_gen_goto_tb(1);
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1231 ret = EXIT_GOTO_TB;
1232 } else {
1233 /* Fallthru can use goto_tb, but taken branch cannot. */
1234 /* Store taken branch destination before the brcond. This
1235 avoids having to allocate a new local temp to hold it.
1236 We'll overwrite this in the not taken case anyway. */
1237 if (!is_imm) {
1238 tcg_gen_mov_i64(psw_addr, cdest);
1241 lab = gen_new_label();
1242 if (c->is_64) {
1243 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1244 } else {
1245 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1248 /* Branch not taken. */
1249 update_cc_op(s);
1250 tcg_gen_goto_tb(0);
1251 tcg_gen_movi_i64(psw_addr, s->next_pc);
1252 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1254 gen_set_label(lab);
1255 if (is_imm) {
1256 tcg_gen_movi_i64(psw_addr, dest);
1258 ret = EXIT_PC_UPDATED;
1260 } else {
1261 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1262 Most commonly we're single-stepping or some other condition that
1263 disables all use of goto_tb. Just update the PC and exit. */
1265 TCGv_i64 next = tcg_const_i64(s->next_pc);
1266 if (is_imm) {
1267 cdest = tcg_const_i64(dest);
1270 if (c->is_64) {
1271 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1272 cdest, next);
1273 } else {
1274 TCGv_i32 t0 = tcg_temp_new_i32();
1275 TCGv_i64 t1 = tcg_temp_new_i64();
1276 TCGv_i64 z = tcg_const_i64(0);
1277 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1278 tcg_gen_extu_i32_i64(t1, t0);
1279 tcg_temp_free_i32(t0);
1280 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1281 tcg_temp_free_i64(t1);
1282 tcg_temp_free_i64(z);
1285 if (is_imm) {
1286 tcg_temp_free_i64(cdest);
1288 tcg_temp_free_i64(next);
1290 ret = EXIT_PC_UPDATED;
1293 egress:
1294 free_compare(c);
1295 return ret;
1298 /* ====================================================================== */
1299 /* The operations. These perform the bulk of the work for any insn,
1300 usually after the operands have been loaded and output initialized. */
1302 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1304 gen_helper_abs_i64(o->out, o->in2);
1305 return NO_EXIT;
1308 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1310 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1311 return NO_EXIT;
1314 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1316 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1317 return NO_EXIT;
1320 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1322 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1323 tcg_gen_mov_i64(o->out2, o->in2);
1324 return NO_EXIT;
1327 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1329 tcg_gen_add_i64(o->out, o->in1, o->in2);
1330 return NO_EXIT;
1333 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1335 DisasCompare cmp;
1336 TCGv_i64 carry;
1338 tcg_gen_add_i64(o->out, o->in1, o->in2);
1340 /* The carry flag is the msb of CC, therefore the branch mask that would
1341 create that comparison is 3. Feeding the generated comparison to
1342 setcond produces the carry flag that we desire. */
1343 disas_jcc(s, &cmp, 3);
1344 carry = tcg_temp_new_i64();
1345 if (cmp.is_64) {
1346 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1347 } else {
1348 TCGv_i32 t = tcg_temp_new_i32();
1349 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1350 tcg_gen_extu_i32_i64(carry, t);
1351 tcg_temp_free_i32(t);
1353 free_compare(&cmp);
1355 tcg_gen_add_i64(o->out, o->out, carry);
1356 tcg_temp_free_i64(carry);
1357 return NO_EXIT;
1360 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1362 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1363 return NO_EXIT;
1366 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1368 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1369 return NO_EXIT;
1372 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1374 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1375 return_low128(o->out2);
1376 return NO_EXIT;
1379 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1381 tcg_gen_and_i64(o->out, o->in1, o->in2);
1382 return NO_EXIT;
1385 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1387 int shift = s->insn->data & 0xff;
1388 int size = s->insn->data >> 8;
1389 uint64_t mask = ((1ull << size) - 1) << shift;
1391 assert(!o->g_in2);
1392 tcg_gen_shli_i64(o->in2, o->in2, shift);
1393 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1394 tcg_gen_and_i64(o->out, o->in1, o->in2);
1396 /* Produce the CC from only the bits manipulated. */
1397 tcg_gen_andi_i64(cc_dst, o->out, mask);
1398 set_cc_nz_u64(s, cc_dst);
1399 return NO_EXIT;
1402 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1404 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1405 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1406 tcg_gen_mov_i64(psw_addr, o->in2);
1407 return EXIT_PC_UPDATED;
1408 } else {
1409 return NO_EXIT;
1413 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1415 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1416 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1419 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1421 int m1 = get_field(s->fields, m1);
1422 bool is_imm = have_field(s->fields, i2);
1423 int imm = is_imm ? get_field(s->fields, i2) : 0;
1424 DisasCompare c;
1426 disas_jcc(s, &c, m1);
1427 return help_branch(s, &c, is_imm, imm, o->in2);
1430 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1432 int r1 = get_field(s->fields, r1);
1433 bool is_imm = have_field(s->fields, i2);
1434 int imm = is_imm ? get_field(s->fields, i2) : 0;
1435 DisasCompare c;
1436 TCGv_i64 t;
1438 c.cond = TCG_COND_NE;
1439 c.is_64 = false;
1440 c.g1 = false;
1441 c.g2 = false;
1443 t = tcg_temp_new_i64();
1444 tcg_gen_subi_i64(t, regs[r1], 1);
1445 store_reg32_i64(r1, t);
1446 c.u.s32.a = tcg_temp_new_i32();
1447 c.u.s32.b = tcg_const_i32(0);
1448 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1449 tcg_temp_free_i64(t);
1451 return help_branch(s, &c, is_imm, imm, o->in2);
1454 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1456 int r1 = get_field(s->fields, r1);
1457 bool is_imm = have_field(s->fields, i2);
1458 int imm = is_imm ? get_field(s->fields, i2) : 0;
1459 DisasCompare c;
1461 c.cond = TCG_COND_NE;
1462 c.is_64 = true;
1463 c.g1 = true;
1464 c.g2 = false;
1466 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1467 c.u.s64.a = regs[r1];
1468 c.u.s64.b = tcg_const_i64(0);
1470 return help_branch(s, &c, is_imm, imm, o->in2);
1473 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1475 int r1 = get_field(s->fields, r1);
1476 int r3 = get_field(s->fields, r3);
1477 bool is_imm = have_field(s->fields, i2);
1478 int imm = is_imm ? get_field(s->fields, i2) : 0;
1479 DisasCompare c;
1480 TCGv_i64 t;
1482 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1483 c.is_64 = false;
1484 c.g1 = false;
1485 c.g2 = false;
1487 t = tcg_temp_new_i64();
1488 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1489 c.u.s32.a = tcg_temp_new_i32();
1490 c.u.s32.b = tcg_temp_new_i32();
1491 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1492 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1493 store_reg32_i64(r1, t);
1494 tcg_temp_free_i64(t);
1496 return help_branch(s, &c, is_imm, imm, o->in2);
1499 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1501 int r1 = get_field(s->fields, r1);
1502 int r3 = get_field(s->fields, r3);
1503 bool is_imm = have_field(s->fields, i2);
1504 int imm = is_imm ? get_field(s->fields, i2) : 0;
1505 DisasCompare c;
1507 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1508 c.is_64 = true;
1510 if (r1 == (r3 | 1)) {
1511 c.u.s64.b = load_reg(r3 | 1);
1512 c.g2 = false;
1513 } else {
1514 c.u.s64.b = regs[r3 | 1];
1515 c.g2 = true;
1518 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1519 c.u.s64.a = regs[r1];
1520 c.g1 = true;
1522 return help_branch(s, &c, is_imm, imm, o->in2);
1525 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1527 int imm, m3 = get_field(s->fields, m3);
1528 bool is_imm;
1529 DisasCompare c;
1531 c.cond = ltgt_cond[m3];
1532 if (s->insn->data) {
1533 c.cond = tcg_unsigned_cond(c.cond);
1535 c.is_64 = c.g1 = c.g2 = true;
1536 c.u.s64.a = o->in1;
1537 c.u.s64.b = o->in2;
1539 is_imm = have_field(s->fields, i4);
1540 if (is_imm) {
1541 imm = get_field(s->fields, i4);
1542 } else {
1543 imm = 0;
1544 o->out = get_address(s, 0, get_field(s->fields, b4),
1545 get_field(s->fields, d4));
1548 return help_branch(s, &c, is_imm, imm, o->out);
1551 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1553 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1554 set_cc_static(s);
1555 return NO_EXIT;
1558 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1560 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1561 set_cc_static(s);
1562 return NO_EXIT;
1565 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1567 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1568 set_cc_static(s);
1569 return NO_EXIT;
1572 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1574 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1575 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1576 tcg_temp_free_i32(m3);
1577 gen_set_cc_nz_f32(s, o->in2);
1578 return NO_EXIT;
1581 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1583 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1584 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1585 tcg_temp_free_i32(m3);
1586 gen_set_cc_nz_f64(s, o->in2);
1587 return NO_EXIT;
1590 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1592 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1593 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1594 tcg_temp_free_i32(m3);
1595 gen_set_cc_nz_f128(s, o->in1, o->in2);
1596 return NO_EXIT;
1599 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1601 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1602 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1603 tcg_temp_free_i32(m3);
1604 gen_set_cc_nz_f32(s, o->in2);
1605 return NO_EXIT;
1608 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1610 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1611 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1612 tcg_temp_free_i32(m3);
1613 gen_set_cc_nz_f64(s, o->in2);
1614 return NO_EXIT;
1617 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1619 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1620 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1621 tcg_temp_free_i32(m3);
1622 gen_set_cc_nz_f128(s, o->in1, o->in2);
1623 return NO_EXIT;
1626 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1628 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1629 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1630 tcg_temp_free_i32(m3);
1631 gen_set_cc_nz_f32(s, o->in2);
1632 return NO_EXIT;
1635 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1637 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1638 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1639 tcg_temp_free_i32(m3);
1640 gen_set_cc_nz_f64(s, o->in2);
1641 return NO_EXIT;
1644 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1646 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1647 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1648 tcg_temp_free_i32(m3);
1649 gen_set_cc_nz_f128(s, o->in1, o->in2);
1650 return NO_EXIT;
1653 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1655 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1656 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1657 tcg_temp_free_i32(m3);
1658 gen_set_cc_nz_f32(s, o->in2);
1659 return NO_EXIT;
1662 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1664 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1665 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1666 tcg_temp_free_i32(m3);
1667 gen_set_cc_nz_f64(s, o->in2);
1668 return NO_EXIT;
1671 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1673 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1674 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1675 tcg_temp_free_i32(m3);
1676 gen_set_cc_nz_f128(s, o->in1, o->in2);
1677 return NO_EXIT;
1680 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1682 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1683 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1684 tcg_temp_free_i32(m3);
1685 return NO_EXIT;
1688 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1690 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1691 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1692 tcg_temp_free_i32(m3);
1693 return NO_EXIT;
1696 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1698 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1699 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1700 tcg_temp_free_i32(m3);
1701 return_low128(o->out2);
1702 return NO_EXIT;
1705 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1707 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1708 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1709 tcg_temp_free_i32(m3);
1710 return NO_EXIT;
1713 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1715 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1716 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1717 tcg_temp_free_i32(m3);
1718 return NO_EXIT;
1721 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1723 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1724 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1725 tcg_temp_free_i32(m3);
1726 return_low128(o->out2);
1727 return NO_EXIT;
1730 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1732 int r2 = get_field(s->fields, r2);
1733 TCGv_i64 len = tcg_temp_new_i64();
1735 potential_page_fault(s);
1736 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1737 set_cc_static(s);
1738 return_low128(o->out);
1740 tcg_gen_add_i64(regs[r2], regs[r2], len);
1741 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1742 tcg_temp_free_i64(len);
1744 return NO_EXIT;
1747 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1749 int l = get_field(s->fields, l1);
1750 TCGv_i32 vl;
1752 switch (l + 1) {
1753 case 1:
1754 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1755 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1756 break;
1757 case 2:
1758 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1759 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1760 break;
1761 case 4:
1762 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1763 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1764 break;
1765 case 8:
1766 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1767 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1768 break;
1769 default:
1770 potential_page_fault(s);
1771 vl = tcg_const_i32(l);
1772 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1773 tcg_temp_free_i32(vl);
1774 set_cc_static(s);
1775 return NO_EXIT;
1777 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1778 return NO_EXIT;
1781 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1783 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1784 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1785 potential_page_fault(s);
1786 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1787 tcg_temp_free_i32(r1);
1788 tcg_temp_free_i32(r3);
1789 set_cc_static(s);
1790 return NO_EXIT;
1793 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1795 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1796 TCGv_i32 t1 = tcg_temp_new_i32();
1797 tcg_gen_trunc_i64_i32(t1, o->in1);
1798 potential_page_fault(s);
1799 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1800 set_cc_static(s);
1801 tcg_temp_free_i32(t1);
1802 tcg_temp_free_i32(m3);
1803 return NO_EXIT;
1806 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1808 potential_page_fault(s);
1809 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1810 set_cc_static(s);
1811 return_low128(o->in2);
1812 return NO_EXIT;
1815 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1817 TCGv_i64 t = tcg_temp_new_i64();
1818 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1819 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1820 tcg_gen_or_i64(o->out, o->out, t);
1821 tcg_temp_free_i64(t);
1822 return NO_EXIT;
1825 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1827 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1828 int d2 = get_field(s->fields, d2);
1829 int b2 = get_field(s->fields, b2);
1830 int is_64 = s->insn->data;
1831 TCGv_i64 addr, mem, cc, z;
1833 /* Note that in1 = R3 (new value) and
1834 in2 = (zero-extended) R1 (expected value). */
1836 /* Load the memory into the (temporary) output. While the PoO only talks
1837 about moving the memory to R1 on inequality, if we include equality it
1838 means that R1 is equal to the memory in all conditions. */
1839 addr = get_address(s, 0, b2, d2);
1840 if (is_64) {
1841 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1842 } else {
1843 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1846 /* Are the memory and expected values (un)equal? Note that this setcond
1847 produces the output CC value, thus the NE sense of the test. */
1848 cc = tcg_temp_new_i64();
1849 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1851 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1852 Recall that we are allowed to unconditionally issue the store (and
1853 thus any possible write trap), so (re-)store the original contents
1854 of MEM in case of inequality. */
1855 z = tcg_const_i64(0);
1856 mem = tcg_temp_new_i64();
1857 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1858 if (is_64) {
1859 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1860 } else {
1861 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1863 tcg_temp_free_i64(z);
1864 tcg_temp_free_i64(mem);
1865 tcg_temp_free_i64(addr);
1867 /* Store CC back to cc_op. Wait until after the store so that any
1868 exception gets the old cc_op value. */
1869 tcg_gen_trunc_i64_i32(cc_op, cc);
1870 tcg_temp_free_i64(cc);
1871 set_cc_static(s);
1872 return NO_EXIT;
1875 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1877 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1878 int r1 = get_field(s->fields, r1);
1879 int r3 = get_field(s->fields, r3);
1880 int d2 = get_field(s->fields, d2);
1881 int b2 = get_field(s->fields, b2);
1882 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1884 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1886 addrh = get_address(s, 0, b2, d2);
1887 addrl = get_address(s, 0, b2, d2 + 8);
1888 outh = tcg_temp_new_i64();
1889 outl = tcg_temp_new_i64();
1891 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1892 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1894 /* Fold the double-word compare with arithmetic. */
1895 cc = tcg_temp_new_i64();
1896 z = tcg_temp_new_i64();
1897 tcg_gen_xor_i64(cc, outh, regs[r1]);
1898 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1899 tcg_gen_or_i64(cc, cc, z);
1900 tcg_gen_movi_i64(z, 0);
1901 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1903 memh = tcg_temp_new_i64();
1904 meml = tcg_temp_new_i64();
1905 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1906 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1907 tcg_temp_free_i64(z);
1909 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1910 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1911 tcg_temp_free_i64(memh);
1912 tcg_temp_free_i64(meml);
1913 tcg_temp_free_i64(addrh);
1914 tcg_temp_free_i64(addrl);
1916 /* Save back state now that we've passed all exceptions. */
1917 tcg_gen_mov_i64(regs[r1], outh);
1918 tcg_gen_mov_i64(regs[r1 + 1], outl);
1919 tcg_gen_trunc_i64_i32(cc_op, cc);
1920 tcg_temp_free_i64(outh);
1921 tcg_temp_free_i64(outl);
1922 tcg_temp_free_i64(cc);
1923 set_cc_static(s);
1924 return NO_EXIT;
1927 #ifndef CONFIG_USER_ONLY
1928 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1930 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1931 check_privileged(s);
1932 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1933 tcg_temp_free_i32(r1);
1934 set_cc_static(s);
1935 return NO_EXIT;
1937 #endif
1939 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1941 TCGv_i64 t1 = tcg_temp_new_i64();
1942 TCGv_i32 t2 = tcg_temp_new_i32();
1943 tcg_gen_trunc_i64_i32(t2, o->in1);
1944 gen_helper_cvd(t1, t2);
1945 tcg_temp_free_i32(t2);
1946 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1947 tcg_temp_free_i64(t1);
1948 return NO_EXIT;
1951 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1953 int m3 = get_field(s->fields, m3);
1954 int lab = gen_new_label();
1955 TCGv_i32 t;
1956 TCGCond c;
1958 c = tcg_invert_cond(ltgt_cond[m3]);
1959 if (s->insn->data) {
1960 c = tcg_unsigned_cond(c);
1962 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1964 /* Set DXC to 0xff. */
1965 t = tcg_temp_new_i32();
1966 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1967 tcg_gen_ori_i32(t, t, 0xff00);
1968 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1969 tcg_temp_free_i32(t);
1971 /* Trap. */
1972 gen_program_exception(s, PGM_DATA);
1974 gen_set_label(lab);
1975 return NO_EXIT;
1978 #ifndef CONFIG_USER_ONLY
1979 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1981 TCGv_i32 tmp;
1983 check_privileged(s);
1984 potential_page_fault(s);
1986 /* We pretend the format is RX_a so that D2 is the field we want. */
1987 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1988 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1989 tcg_temp_free_i32(tmp);
1990 return NO_EXIT;
1992 #endif
1994 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1996 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1997 return_low128(o->out);
1998 return NO_EXIT;
2001 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2003 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2004 return_low128(o->out);
2005 return NO_EXIT;
2008 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2010 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2011 return_low128(o->out);
2012 return NO_EXIT;
2015 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2017 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2018 return_low128(o->out);
2019 return NO_EXIT;
2022 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2024 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2025 return NO_EXIT;
2028 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2030 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2031 return NO_EXIT;
2034 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2036 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2037 return_low128(o->out2);
2038 return NO_EXIT;
2041 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2043 int r2 = get_field(s->fields, r2);
2044 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2045 return NO_EXIT;
2048 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2050 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2051 return NO_EXIT;
2054 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2056 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2057 tb->flags, (ab)use the tb->cs_base field as the address of
2058 the template in memory, and grab 8 bits of tb->flags/cflags for
2059 the contents of the register. We would then recognize all this
2060 in gen_intermediate_code_internal, generating code for exactly
2061 one instruction. This new TB then gets executed normally.
2063 On the other hand, this seems to be mostly used for modifying
2064 MVC inside of memcpy, which needs a helper call anyway. So
2065 perhaps this doesn't bear thinking about any further. */
2067 TCGv_i64 tmp;
2069 update_psw_addr(s);
2070 update_cc_op(s);
2072 tmp = tcg_const_i64(s->next_pc);
2073 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2074 tcg_temp_free_i64(tmp);
2076 set_cc_static(s);
2077 return NO_EXIT;
2080 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2082 /* We'll use the original input for cc computation, since we get to
2083 compare that against 0, which ought to be better than comparing
2084 the real output against 64. It also lets cc_dst be a convenient
2085 temporary during our computation. */
2086 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2088 /* R1 = IN ? CLZ(IN) : 64. */
2089 gen_helper_clz(o->out, o->in2);
2091 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2092 value by 64, which is undefined. But since the shift is 64 iff the
2093 input is zero, we still get the correct result after and'ing. */
2094 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2095 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2096 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2097 return NO_EXIT;
2100 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2102 int m3 = get_field(s->fields, m3);
2103 int pos, len, base = s->insn->data;
2104 TCGv_i64 tmp = tcg_temp_new_i64();
2105 uint64_t ccm;
2107 switch (m3) {
2108 case 0xf:
2109 /* Effectively a 32-bit load. */
2110 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2111 len = 32;
2112 goto one_insert;
2114 case 0xc:
2115 case 0x6:
2116 case 0x3:
2117 /* Effectively a 16-bit load. */
2118 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2119 len = 16;
2120 goto one_insert;
2122 case 0x8:
2123 case 0x4:
2124 case 0x2:
2125 case 0x1:
2126 /* Effectively an 8-bit load. */
2127 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2128 len = 8;
2129 goto one_insert;
2131 one_insert:
2132 pos = base + ctz32(m3) * 8;
2133 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2134 ccm = ((1ull << len) - 1) << pos;
2135 break;
2137 default:
2138 /* This is going to be a sequence of loads and inserts. */
2139 pos = base + 32 - 8;
2140 ccm = 0;
2141 while (m3) {
2142 if (m3 & 0x8) {
2143 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2144 tcg_gen_addi_i64(o->in2, o->in2, 1);
2145 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2146 ccm |= 0xff << pos;
2148 m3 = (m3 << 1) & 0xf;
2149 pos -= 8;
2151 break;
2154 tcg_gen_movi_i64(tmp, ccm);
2155 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2156 tcg_temp_free_i64(tmp);
2157 return NO_EXIT;
2160 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2162 int shift = s->insn->data & 0xff;
2163 int size = s->insn->data >> 8;
2164 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2165 return NO_EXIT;
2168 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2170 TCGv_i64 t1;
2172 gen_op_calc_cc(s);
2173 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2175 t1 = tcg_temp_new_i64();
2176 tcg_gen_shli_i64(t1, psw_mask, 20);
2177 tcg_gen_shri_i64(t1, t1, 36);
2178 tcg_gen_or_i64(o->out, o->out, t1);
2180 tcg_gen_extu_i32_i64(t1, cc_op);
2181 tcg_gen_shli_i64(t1, t1, 28);
2182 tcg_gen_or_i64(o->out, o->out, t1);
2183 tcg_temp_free_i64(t1);
2184 return NO_EXIT;
2187 #ifndef CONFIG_USER_ONLY
2188 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2190 check_privileged(s);
2191 gen_helper_ipte(cpu_env, o->in1, o->in2);
2192 return NO_EXIT;
2195 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2197 check_privileged(s);
2198 gen_helper_iske(o->out, cpu_env, o->in2);
2199 return NO_EXIT;
2201 #endif
2203 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2205 gen_helper_ldeb(o->out, cpu_env, o->in2);
2206 return NO_EXIT;
2209 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2211 gen_helper_ledb(o->out, cpu_env, o->in2);
2212 return NO_EXIT;
2215 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2217 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2218 return NO_EXIT;
2221 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2223 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2224 return NO_EXIT;
2227 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2229 gen_helper_lxdb(o->out, cpu_env, o->in2);
2230 return_low128(o->out2);
2231 return NO_EXIT;
2234 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2236 gen_helper_lxeb(o->out, cpu_env, o->in2);
2237 return_low128(o->out2);
2238 return NO_EXIT;
2241 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2243 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2244 return NO_EXIT;
2247 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2249 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2250 return NO_EXIT;
2253 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2255 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2256 return NO_EXIT;
2259 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2261 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2262 return NO_EXIT;
2265 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2267 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2268 return NO_EXIT;
2271 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2273 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2274 return NO_EXIT;
2277 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2279 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2280 return NO_EXIT;
2283 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2285 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2286 return NO_EXIT;
2289 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2291 DisasCompare c;
2293 disas_jcc(s, &c, get_field(s->fields, m3));
2295 if (c.is_64) {
2296 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2297 o->in2, o->in1);
2298 free_compare(&c);
2299 } else {
2300 TCGv_i32 t32 = tcg_temp_new_i32();
2301 TCGv_i64 t, z;
2303 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2304 free_compare(&c);
2306 t = tcg_temp_new_i64();
2307 tcg_gen_extu_i32_i64(t, t32);
2308 tcg_temp_free_i32(t32);
2310 z = tcg_const_i64(0);
2311 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2312 tcg_temp_free_i64(t);
2313 tcg_temp_free_i64(z);
2316 return NO_EXIT;
2319 #ifndef CONFIG_USER_ONLY
2320 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2322 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2323 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2324 check_privileged(s);
2325 potential_page_fault(s);
2326 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2327 tcg_temp_free_i32(r1);
2328 tcg_temp_free_i32(r3);
2329 return NO_EXIT;
2332 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2334 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2335 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2336 check_privileged(s);
2337 potential_page_fault(s);
2338 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2339 tcg_temp_free_i32(r1);
2340 tcg_temp_free_i32(r3);
2341 return NO_EXIT;
2343 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2345 check_privileged(s);
2346 potential_page_fault(s);
2347 gen_helper_lra(o->out, cpu_env, o->in2);
2348 set_cc_static(s);
2349 return NO_EXIT;
2352 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2354 TCGv_i64 t1, t2;
2356 check_privileged(s);
2358 t1 = tcg_temp_new_i64();
2359 t2 = tcg_temp_new_i64();
2360 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2361 tcg_gen_addi_i64(o->in2, o->in2, 4);
2362 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2363 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2364 tcg_gen_shli_i64(t1, t1, 32);
2365 gen_helper_load_psw(cpu_env, t1, t2);
2366 tcg_temp_free_i64(t1);
2367 tcg_temp_free_i64(t2);
2368 return EXIT_NORETURN;
2371 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2373 TCGv_i64 t1, t2;
2375 check_privileged(s);
2377 t1 = tcg_temp_new_i64();
2378 t2 = tcg_temp_new_i64();
2379 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2380 tcg_gen_addi_i64(o->in2, o->in2, 8);
2381 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2382 gen_helper_load_psw(cpu_env, t1, t2);
2383 tcg_temp_free_i64(t1);
2384 tcg_temp_free_i64(t2);
2385 return EXIT_NORETURN;
2387 #endif
2389 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2391 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2392 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2393 potential_page_fault(s);
2394 gen_helper_lam(cpu_env, r1, o->in2, r3);
2395 tcg_temp_free_i32(r1);
2396 tcg_temp_free_i32(r3);
2397 return NO_EXIT;
2400 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2402 int r1 = get_field(s->fields, r1);
2403 int r3 = get_field(s->fields, r3);
2404 TCGv_i64 t = tcg_temp_new_i64();
2405 TCGv_i64 t4 = tcg_const_i64(4);
2407 while (1) {
2408 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2409 store_reg32_i64(r1, t);
2410 if (r1 == r3) {
2411 break;
2413 tcg_gen_add_i64(o->in2, o->in2, t4);
2414 r1 = (r1 + 1) & 15;
2417 tcg_temp_free_i64(t);
2418 tcg_temp_free_i64(t4);
2419 return NO_EXIT;
2422 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2424 int r1 = get_field(s->fields, r1);
2425 int r3 = get_field(s->fields, r3);
2426 TCGv_i64 t = tcg_temp_new_i64();
2427 TCGv_i64 t4 = tcg_const_i64(4);
2429 while (1) {
2430 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2431 store_reg32h_i64(r1, t);
2432 if (r1 == r3) {
2433 break;
2435 tcg_gen_add_i64(o->in2, o->in2, t4);
2436 r1 = (r1 + 1) & 15;
2439 tcg_temp_free_i64(t);
2440 tcg_temp_free_i64(t4);
2441 return NO_EXIT;
2444 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2446 int r1 = get_field(s->fields, r1);
2447 int r3 = get_field(s->fields, r3);
2448 TCGv_i64 t8 = tcg_const_i64(8);
2450 while (1) {
2451 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2452 if (r1 == r3) {
2453 break;
2455 tcg_gen_add_i64(o->in2, o->in2, t8);
2456 r1 = (r1 + 1) & 15;
2459 tcg_temp_free_i64(t8);
2460 return NO_EXIT;
2463 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2465 o->out = o->in2;
2466 o->g_out = o->g_in2;
2467 TCGV_UNUSED_I64(o->in2);
2468 o->g_in2 = false;
2469 return NO_EXIT;
2472 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2474 o->out = o->in1;
2475 o->out2 = o->in2;
2476 o->g_out = o->g_in1;
2477 o->g_out2 = o->g_in2;
2478 TCGV_UNUSED_I64(o->in1);
2479 TCGV_UNUSED_I64(o->in2);
2480 o->g_in1 = o->g_in2 = false;
2481 return NO_EXIT;
2484 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2486 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2487 potential_page_fault(s);
2488 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2489 tcg_temp_free_i32(l);
2490 return NO_EXIT;
2493 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2495 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2496 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2497 potential_page_fault(s);
2498 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2499 tcg_temp_free_i32(r1);
2500 tcg_temp_free_i32(r2);
2501 set_cc_static(s);
2502 return NO_EXIT;
2505 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2507 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2508 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2509 potential_page_fault(s);
2510 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2511 tcg_temp_free_i32(r1);
2512 tcg_temp_free_i32(r3);
2513 set_cc_static(s);
2514 return NO_EXIT;
2517 #ifndef CONFIG_USER_ONLY
2518 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2520 int r1 = get_field(s->fields, l1);
2521 check_privileged(s);
2522 potential_page_fault(s);
2523 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2524 set_cc_static(s);
2525 return NO_EXIT;
2528 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2530 int r1 = get_field(s->fields, l1);
2531 check_privileged(s);
2532 potential_page_fault(s);
2533 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2534 set_cc_static(s);
2535 return NO_EXIT;
2537 #endif
2539 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2541 potential_page_fault(s);
2542 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2543 set_cc_static(s);
2544 return NO_EXIT;
2547 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2549 potential_page_fault(s);
2550 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2551 set_cc_static(s);
2552 return_low128(o->in2);
2553 return NO_EXIT;
2556 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2558 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2559 return NO_EXIT;
2562 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2564 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2565 return NO_EXIT;
2568 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2570 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2571 return NO_EXIT;
2574 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2576 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2577 return NO_EXIT;
2580 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2582 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2583 return NO_EXIT;
2586 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2588 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2589 return_low128(o->out2);
2590 return NO_EXIT;
2593 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2595 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2596 return_low128(o->out2);
2597 return NO_EXIT;
2600 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2602 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2603 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2604 tcg_temp_free_i64(r3);
2605 return NO_EXIT;
2608 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2610 int r3 = get_field(s->fields, r3);
2611 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2612 return NO_EXIT;
2615 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2617 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2618 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2619 tcg_temp_free_i64(r3);
2620 return NO_EXIT;
2623 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2625 int r3 = get_field(s->fields, r3);
2626 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2627 return NO_EXIT;
2630 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2632 gen_helper_nabs_i64(o->out, o->in2);
2633 return NO_EXIT;
2636 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2638 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2639 return NO_EXIT;
2642 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2644 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2645 return NO_EXIT;
2648 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2650 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2651 tcg_gen_mov_i64(o->out2, o->in2);
2652 return NO_EXIT;
2655 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2657 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2658 potential_page_fault(s);
2659 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2660 tcg_temp_free_i32(l);
2661 set_cc_static(s);
2662 return NO_EXIT;
2665 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2667 tcg_gen_neg_i64(o->out, o->in2);
2668 return NO_EXIT;
2671 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2673 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2674 return NO_EXIT;
2677 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2679 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2680 return NO_EXIT;
2683 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2685 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2686 tcg_gen_mov_i64(o->out2, o->in2);
2687 return NO_EXIT;
2690 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2692 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2693 potential_page_fault(s);
2694 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2695 tcg_temp_free_i32(l);
2696 set_cc_static(s);
2697 return NO_EXIT;
2700 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2702 tcg_gen_or_i64(o->out, o->in1, o->in2);
2703 return NO_EXIT;
2706 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2708 int shift = s->insn->data & 0xff;
2709 int size = s->insn->data >> 8;
2710 uint64_t mask = ((1ull << size) - 1) << shift;
2712 assert(!o->g_in2);
2713 tcg_gen_shli_i64(o->in2, o->in2, shift);
2714 tcg_gen_or_i64(o->out, o->in1, o->in2);
2716 /* Produce the CC from only the bits manipulated. */
2717 tcg_gen_andi_i64(cc_dst, o->out, mask);
2718 set_cc_nz_u64(s, cc_dst);
2719 return NO_EXIT;
2722 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2724 gen_helper_popcnt(o->out, o->in2);
2725 return NO_EXIT;
2728 #ifndef CONFIG_USER_ONLY
2729 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2731 check_privileged(s);
2732 gen_helper_ptlb(cpu_env);
2733 return NO_EXIT;
2735 #endif
2737 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2739 int i3 = get_field(s->fields, i3);
2740 int i4 = get_field(s->fields, i4);
2741 int i5 = get_field(s->fields, i5);
2742 int do_zero = i4 & 0x80;
2743 uint64_t mask, imask, pmask;
2744 int pos, len, rot;
2746 /* Adjust the arguments for the specific insn. */
2747 switch (s->fields->op2) {
2748 case 0x55: /* risbg */
2749 i3 &= 63;
2750 i4 &= 63;
2751 pmask = ~0;
2752 break;
2753 case 0x5d: /* risbhg */
2754 i3 &= 31;
2755 i4 &= 31;
2756 pmask = 0xffffffff00000000ull;
2757 break;
2758 case 0x51: /* risblg */
2759 i3 &= 31;
2760 i4 &= 31;
2761 pmask = 0x00000000ffffffffull;
2762 break;
2763 default:
2764 abort();
2767 /* MASK is the set of bits to be inserted from R2.
2768 Take care for I3/I4 wraparound. */
2769 mask = pmask >> i3;
2770 if (i3 <= i4) {
2771 mask ^= pmask >> i4 >> 1;
2772 } else {
2773 mask |= ~(pmask >> i4 >> 1);
2775 mask &= pmask;
2777 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2778 insns, we need to keep the other half of the register. */
2779 imask = ~mask | ~pmask;
2780 if (do_zero) {
2781 if (s->fields->op2 == 0x55) {
2782 imask = 0;
2783 } else {
2784 imask = ~pmask;
2788 /* In some cases we can implement this with deposit, which can be more
2789 efficient on some hosts. */
2790 if (~mask == imask && i3 <= i4) {
2791 if (s->fields->op2 == 0x5d) {
2792 i3 += 32, i4 += 32;
2794 /* Note that we rotate the bits to be inserted to the lsb, not to
2795 the position as described in the PoO. */
2796 len = i4 - i3 + 1;
2797 pos = 63 - i4;
2798 rot = (i5 - pos) & 63;
2799 } else {
2800 pos = len = -1;
2801 rot = i5 & 63;
2804 /* Rotate the input as necessary. */
2805 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2807 /* Insert the selected bits into the output. */
2808 if (pos >= 0) {
2809 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2810 } else if (imask == 0) {
2811 tcg_gen_andi_i64(o->out, o->in2, mask);
2812 } else {
2813 tcg_gen_andi_i64(o->in2, o->in2, mask);
2814 tcg_gen_andi_i64(o->out, o->out, imask);
2815 tcg_gen_or_i64(o->out, o->out, o->in2);
2817 return NO_EXIT;
2820 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2822 int i3 = get_field(s->fields, i3);
2823 int i4 = get_field(s->fields, i4);
2824 int i5 = get_field(s->fields, i5);
2825 uint64_t mask;
2827 /* If this is a test-only form, arrange to discard the result. */
2828 if (i3 & 0x80) {
2829 o->out = tcg_temp_new_i64();
2830 o->g_out = false;
2833 i3 &= 63;
2834 i4 &= 63;
2835 i5 &= 63;
2837 /* MASK is the set of bits to be operated on from R2.
2838 Take care for I3/I4 wraparound. */
2839 mask = ~0ull >> i3;
2840 if (i3 <= i4) {
2841 mask ^= ~0ull >> i4 >> 1;
2842 } else {
2843 mask |= ~(~0ull >> i4 >> 1);
2846 /* Rotate the input as necessary. */
2847 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2849 /* Operate. */
2850 switch (s->fields->op2) {
2851 case 0x55: /* AND */
2852 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2853 tcg_gen_and_i64(o->out, o->out, o->in2);
2854 break;
2855 case 0x56: /* OR */
2856 tcg_gen_andi_i64(o->in2, o->in2, mask);
2857 tcg_gen_or_i64(o->out, o->out, o->in2);
2858 break;
2859 case 0x57: /* XOR */
2860 tcg_gen_andi_i64(o->in2, o->in2, mask);
2861 tcg_gen_xor_i64(o->out, o->out, o->in2);
2862 break;
2863 default:
2864 abort();
2867 /* Set the CC. */
2868 tcg_gen_andi_i64(cc_dst, o->out, mask);
2869 set_cc_nz_u64(s, cc_dst);
2870 return NO_EXIT;
2873 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2875 tcg_gen_bswap16_i64(o->out, o->in2);
2876 return NO_EXIT;
2879 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2881 tcg_gen_bswap32_i64(o->out, o->in2);
2882 return NO_EXIT;
2885 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2887 tcg_gen_bswap64_i64(o->out, o->in2);
2888 return NO_EXIT;
2891 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2893 TCGv_i32 t1 = tcg_temp_new_i32();
2894 TCGv_i32 t2 = tcg_temp_new_i32();
2895 TCGv_i32 to = tcg_temp_new_i32();
2896 tcg_gen_trunc_i64_i32(t1, o->in1);
2897 tcg_gen_trunc_i64_i32(t2, o->in2);
2898 tcg_gen_rotl_i32(to, t1, t2);
2899 tcg_gen_extu_i32_i64(o->out, to);
2900 tcg_temp_free_i32(t1);
2901 tcg_temp_free_i32(t2);
2902 tcg_temp_free_i32(to);
2903 return NO_EXIT;
2906 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2908 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2909 return NO_EXIT;
2912 #ifndef CONFIG_USER_ONLY
2913 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2915 check_privileged(s);
2916 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2917 set_cc_static(s);
2918 return NO_EXIT;
2921 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2923 check_privileged(s);
2924 gen_helper_sacf(cpu_env, o->in2);
2925 /* Addressing mode has changed, so end the block. */
2926 return EXIT_PC_STALE;
2928 #endif
2930 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2932 int r1 = get_field(s->fields, r1);
2933 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2934 return NO_EXIT;
2937 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2939 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2940 return NO_EXIT;
2943 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2945 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2946 return NO_EXIT;
2949 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2951 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2952 return_low128(o->out2);
2953 return NO_EXIT;
2956 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2958 gen_helper_sqeb(o->out, cpu_env, o->in2);
2959 return NO_EXIT;
2962 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2964 gen_helper_sqdb(o->out, cpu_env, o->in2);
2965 return NO_EXIT;
2968 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2970 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2971 return_low128(o->out2);
2972 return NO_EXIT;
2975 #ifndef CONFIG_USER_ONLY
2976 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2978 check_privileged(s);
2979 potential_page_fault(s);
2980 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2981 set_cc_static(s);
2982 return NO_EXIT;
2985 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2987 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2988 check_privileged(s);
2989 potential_page_fault(s);
2990 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2991 tcg_temp_free_i32(r1);
2992 return NO_EXIT;
2994 #endif
2996 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2998 DisasCompare c;
2999 TCGv_i64 a;
3000 int lab, r1;
3002 disas_jcc(s, &c, get_field(s->fields, m3));
3004 lab = gen_new_label();
3005 if (c.is_64) {
3006 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3007 } else {
3008 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3010 free_compare(&c);
3012 r1 = get_field(s->fields, r1);
3013 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3014 if (s->insn->data) {
3015 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3016 } else {
3017 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3019 tcg_temp_free_i64(a);
3021 gen_set_label(lab);
3022 return NO_EXIT;
3025 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3027 uint64_t sign = 1ull << s->insn->data;
3028 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3029 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3030 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3031 /* The arithmetic left shift is curious in that it does not affect
3032 the sign bit. Copy that over from the source unchanged. */
3033 tcg_gen_andi_i64(o->out, o->out, ~sign);
3034 tcg_gen_andi_i64(o->in1, o->in1, sign);
3035 tcg_gen_or_i64(o->out, o->out, o->in1);
3036 return NO_EXIT;
3039 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3041 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3042 return NO_EXIT;
3045 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3047 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3048 return NO_EXIT;
3051 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3053 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3054 return NO_EXIT;
3057 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3059 gen_helper_sfpc(cpu_env, o->in2);
3060 return NO_EXIT;
3063 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3065 gen_helper_sfas(cpu_env, o->in2);
3066 return NO_EXIT;
3069 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3071 int b2 = get_field(s->fields, b2);
3072 int d2 = get_field(s->fields, d2);
3073 TCGv_i64 t1 = tcg_temp_new_i64();
3074 TCGv_i64 t2 = tcg_temp_new_i64();
3075 int mask, pos, len;
3077 switch (s->fields->op2) {
3078 case 0x99: /* SRNM */
3079 pos = 0, len = 2;
3080 break;
3081 case 0xb8: /* SRNMB */
3082 pos = 0, len = 3;
3083 break;
3084 case 0xb9: /* SRNMT */
3085 pos = 4, len = 3;
3086 break;
3087 default:
3088 tcg_abort();
3090 mask = (1 << len) - 1;
3092 /* Insert the value into the appropriate field of the FPC. */
3093 if (b2 == 0) {
3094 tcg_gen_movi_i64(t1, d2 & mask);
3095 } else {
3096 tcg_gen_addi_i64(t1, regs[b2], d2);
3097 tcg_gen_andi_i64(t1, t1, mask);
3099 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3100 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3101 tcg_temp_free_i64(t1);
3103 /* Then install the new FPC to set the rounding mode in fpu_status. */
3104 gen_helper_sfpc(cpu_env, t2);
3105 tcg_temp_free_i64(t2);
3106 return NO_EXIT;
3109 #ifndef CONFIG_USER_ONLY
3110 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3112 check_privileged(s);
3113 tcg_gen_shri_i64(o->in2, o->in2, 4);
3114 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3115 return NO_EXIT;
3118 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3120 check_privileged(s);
3121 gen_helper_sske(cpu_env, o->in1, o->in2);
3122 return NO_EXIT;
3125 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3127 check_privileged(s);
3128 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3129 return NO_EXIT;
3132 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3134 check_privileged(s);
3135 /* ??? Surely cpu address != cpu number. In any case the previous
3136 version of this stored more than the required half-word, so it
3137 is unlikely this has ever been tested. */
3138 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3139 return NO_EXIT;
3142 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3144 gen_helper_stck(o->out, cpu_env);
3145 /* ??? We don't implement clock states. */
3146 gen_op_movi_cc(s, 0);
3147 return NO_EXIT;
3150 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3152 TCGv_i64 c1 = tcg_temp_new_i64();
3153 TCGv_i64 c2 = tcg_temp_new_i64();
3154 gen_helper_stck(c1, cpu_env);
3155 /* Shift the 64-bit value into its place as a zero-extended
3156 104-bit value. Note that "bit positions 64-103 are always
3157 non-zero so that they compare differently to STCK"; we set
3158 the least significant bit to 1. */
3159 tcg_gen_shli_i64(c2, c1, 56);
3160 tcg_gen_shri_i64(c1, c1, 8);
3161 tcg_gen_ori_i64(c2, c2, 0x10000);
3162 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3163 tcg_gen_addi_i64(o->in2, o->in2, 8);
3164 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3165 tcg_temp_free_i64(c1);
3166 tcg_temp_free_i64(c2);
3167 /* ??? We don't implement clock states. */
3168 gen_op_movi_cc(s, 0);
3169 return NO_EXIT;
3172 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3174 check_privileged(s);
3175 gen_helper_sckc(cpu_env, o->in2);
3176 return NO_EXIT;
3179 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3181 check_privileged(s);
3182 gen_helper_stckc(o->out, cpu_env);
3183 return NO_EXIT;
3186 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3188 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3189 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3190 check_privileged(s);
3191 potential_page_fault(s);
3192 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3193 tcg_temp_free_i32(r1);
3194 tcg_temp_free_i32(r3);
3195 return NO_EXIT;
3198 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3200 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3201 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3202 check_privileged(s);
3203 potential_page_fault(s);
3204 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3205 tcg_temp_free_i32(r1);
3206 tcg_temp_free_i32(r3);
3207 return NO_EXIT;
3210 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3212 check_privileged(s);
3213 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3214 return NO_EXIT;
3217 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3219 check_privileged(s);
3220 gen_helper_spt(cpu_env, o->in2);
3221 return NO_EXIT;
3224 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3226 TCGv_i64 f, a;
3227 /* We really ought to have more complete indication of facilities
3228 that we implement. Address this when STFLE is implemented. */
3229 check_privileged(s);
3230 f = tcg_const_i64(0xc0000000);
3231 a = tcg_const_i64(200);
3232 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3233 tcg_temp_free_i64(f);
3234 tcg_temp_free_i64(a);
3235 return NO_EXIT;
3238 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3240 check_privileged(s);
3241 gen_helper_stpt(o->out, cpu_env);
3242 return NO_EXIT;
3245 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3247 check_privileged(s);
3248 potential_page_fault(s);
3249 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3250 set_cc_static(s);
3251 return NO_EXIT;
3254 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3256 check_privileged(s);
3257 gen_helper_spx(cpu_env, o->in2);
3258 return NO_EXIT;
3261 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3263 check_privileged(s);
3264 /* Not operational. */
3265 gen_op_movi_cc(s, 3);
3266 return NO_EXIT;
3269 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3271 check_privileged(s);
3272 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3273 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3274 return NO_EXIT;
3277 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3279 uint64_t i2 = get_field(s->fields, i2);
3280 TCGv_i64 t;
3282 check_privileged(s);
3284 /* It is important to do what the instruction name says: STORE THEN.
3285 If we let the output hook perform the store then if we fault and
3286 restart, we'll have the wrong SYSTEM MASK in place. */
3287 t = tcg_temp_new_i64();
3288 tcg_gen_shri_i64(t, psw_mask, 56);
3289 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3290 tcg_temp_free_i64(t);
3292 if (s->fields->op == 0xac) {
3293 tcg_gen_andi_i64(psw_mask, psw_mask,
3294 (i2 << 56) | 0x00ffffffffffffffull);
3295 } else {
3296 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3298 return NO_EXIT;
3301 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3303 check_privileged(s);
3304 potential_page_fault(s);
3305 gen_helper_stura(cpu_env, o->in2, o->in1);
3306 return NO_EXIT;
3308 #endif
3310 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3312 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3313 return NO_EXIT;
3316 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3318 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3319 return NO_EXIT;
3322 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3324 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3325 return NO_EXIT;
3328 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3330 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3331 return NO_EXIT;
3334 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3336 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3337 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3338 potential_page_fault(s);
3339 gen_helper_stam(cpu_env, r1, o->in2, r3);
3340 tcg_temp_free_i32(r1);
3341 tcg_temp_free_i32(r3);
3342 return NO_EXIT;
3345 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3347 int m3 = get_field(s->fields, m3);
3348 int pos, base = s->insn->data;
3349 TCGv_i64 tmp = tcg_temp_new_i64();
3351 pos = base + ctz32(m3) * 8;
3352 switch (m3) {
3353 case 0xf:
3354 /* Effectively a 32-bit store. */
3355 tcg_gen_shri_i64(tmp, o->in1, pos);
3356 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3357 break;
3359 case 0xc:
3360 case 0x6:
3361 case 0x3:
3362 /* Effectively a 16-bit store. */
3363 tcg_gen_shri_i64(tmp, o->in1, pos);
3364 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3365 break;
3367 case 0x8:
3368 case 0x4:
3369 case 0x2:
3370 case 0x1:
3371 /* Effectively an 8-bit store. */
3372 tcg_gen_shri_i64(tmp, o->in1, pos);
3373 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3374 break;
3376 default:
3377 /* This is going to be a sequence of shifts and stores. */
3378 pos = base + 32 - 8;
3379 while (m3) {
3380 if (m3 & 0x8) {
3381 tcg_gen_shri_i64(tmp, o->in1, pos);
3382 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3383 tcg_gen_addi_i64(o->in2, o->in2, 1);
3385 m3 = (m3 << 1) & 0xf;
3386 pos -= 8;
3388 break;
3390 tcg_temp_free_i64(tmp);
3391 return NO_EXIT;
3394 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3396 int r1 = get_field(s->fields, r1);
3397 int r3 = get_field(s->fields, r3);
3398 int size = s->insn->data;
3399 TCGv_i64 tsize = tcg_const_i64(size);
3401 while (1) {
3402 if (size == 8) {
3403 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3404 } else {
3405 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3407 if (r1 == r3) {
3408 break;
3410 tcg_gen_add_i64(o->in2, o->in2, tsize);
3411 r1 = (r1 + 1) & 15;
3414 tcg_temp_free_i64(tsize);
3415 return NO_EXIT;
3418 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3420 int r1 = get_field(s->fields, r1);
3421 int r3 = get_field(s->fields, r3);
3422 TCGv_i64 t = tcg_temp_new_i64();
3423 TCGv_i64 t4 = tcg_const_i64(4);
3424 TCGv_i64 t32 = tcg_const_i64(32);
3426 while (1) {
3427 tcg_gen_shl_i64(t, regs[r1], t32);
3428 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3429 if (r1 == r3) {
3430 break;
3432 tcg_gen_add_i64(o->in2, o->in2, t4);
3433 r1 = (r1 + 1) & 15;
3436 tcg_temp_free_i64(t);
3437 tcg_temp_free_i64(t4);
3438 tcg_temp_free_i64(t32);
3439 return NO_EXIT;
3442 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3444 potential_page_fault(s);
3445 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3446 set_cc_static(s);
3447 return_low128(o->in2);
3448 return NO_EXIT;
3451 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3453 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3454 return NO_EXIT;
3457 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3459 DisasCompare cmp;
3460 TCGv_i64 borrow;
3462 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3464 /* The !borrow flag is the msb of CC. Since we want the inverse of
3465 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3466 disas_jcc(s, &cmp, 8 | 4);
3467 borrow = tcg_temp_new_i64();
3468 if (cmp.is_64) {
3469 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3470 } else {
3471 TCGv_i32 t = tcg_temp_new_i32();
3472 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3473 tcg_gen_extu_i32_i64(borrow, t);
3474 tcg_temp_free_i32(t);
3476 free_compare(&cmp);
3478 tcg_gen_sub_i64(o->out, o->out, borrow);
3479 tcg_temp_free_i64(borrow);
3480 return NO_EXIT;
3483 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3485 TCGv_i32 t;
3487 update_psw_addr(s);
3488 update_cc_op(s);
3490 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3491 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3492 tcg_temp_free_i32(t);
3494 t = tcg_const_i32(s->next_pc - s->pc);
3495 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3496 tcg_temp_free_i32(t);
3498 gen_exception(EXCP_SVC);
3499 return EXIT_NORETURN;
3502 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3504 gen_helper_tceb(cc_op, o->in1, o->in2);
3505 set_cc_static(s);
3506 return NO_EXIT;
3509 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3511 gen_helper_tcdb(cc_op, o->in1, o->in2);
3512 set_cc_static(s);
3513 return NO_EXIT;
3516 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3518 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3519 set_cc_static(s);
3520 return NO_EXIT;
3523 #ifndef CONFIG_USER_ONLY
3524 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3526 potential_page_fault(s);
3527 gen_helper_tprot(cc_op, o->addr1, o->in2);
3528 set_cc_static(s);
3529 return NO_EXIT;
3531 #endif
3533 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3535 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3536 potential_page_fault(s);
3537 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3538 tcg_temp_free_i32(l);
3539 set_cc_static(s);
3540 return NO_EXIT;
3543 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3545 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3546 potential_page_fault(s);
3547 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3548 tcg_temp_free_i32(l);
3549 return NO_EXIT;
3552 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3554 int d1 = get_field(s->fields, d1);
3555 int d2 = get_field(s->fields, d2);
3556 int b1 = get_field(s->fields, b1);
3557 int b2 = get_field(s->fields, b2);
3558 int l = get_field(s->fields, l1);
3559 TCGv_i32 t32;
3561 o->addr1 = get_address(s, 0, b1, d1);
3563 /* If the addresses are identical, this is a store/memset of zero. */
3564 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3565 o->in2 = tcg_const_i64(0);
3567 l++;
3568 while (l >= 8) {
3569 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3570 l -= 8;
3571 if (l > 0) {
3572 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3575 if (l >= 4) {
3576 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3577 l -= 4;
3578 if (l > 0) {
3579 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3582 if (l >= 2) {
3583 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3584 l -= 2;
3585 if (l > 0) {
3586 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3589 if (l) {
3590 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3592 gen_op_movi_cc(s, 0);
3593 return NO_EXIT;
3596 /* But in general we'll defer to a helper. */
3597 o->in2 = get_address(s, 0, b2, d2);
3598 t32 = tcg_const_i32(l);
3599 potential_page_fault(s);
3600 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3601 tcg_temp_free_i32(t32);
3602 set_cc_static(s);
3603 return NO_EXIT;
3606 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3608 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3609 return NO_EXIT;
3612 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3614 int shift = s->insn->data & 0xff;
3615 int size = s->insn->data >> 8;
3616 uint64_t mask = ((1ull << size) - 1) << shift;
3618 assert(!o->g_in2);
3619 tcg_gen_shli_i64(o->in2, o->in2, shift);
3620 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3622 /* Produce the CC from only the bits manipulated. */
3623 tcg_gen_andi_i64(cc_dst, o->out, mask);
3624 set_cc_nz_u64(s, cc_dst);
3625 return NO_EXIT;
3628 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3630 o->out = tcg_const_i64(0);
3631 return NO_EXIT;
3634 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3636 o->out = tcg_const_i64(0);
3637 o->out2 = o->out;
3638 o->g_out2 = true;
3639 return NO_EXIT;
3642 /* ====================================================================== */
3643 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3644 the original inputs), update the various cc data structures in order to
3645 be able to compute the new condition code. */
3647 static void cout_abs32(DisasContext *s, DisasOps *o)
3649 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3652 static void cout_abs64(DisasContext *s, DisasOps *o)
3654 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3657 static void cout_adds32(DisasContext *s, DisasOps *o)
3659 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3662 static void cout_adds64(DisasContext *s, DisasOps *o)
3664 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3667 static void cout_addu32(DisasContext *s, DisasOps *o)
3669 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3672 static void cout_addu64(DisasContext *s, DisasOps *o)
3674 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3677 static void cout_addc32(DisasContext *s, DisasOps *o)
3679 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3682 static void cout_addc64(DisasContext *s, DisasOps *o)
3684 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3687 static void cout_cmps32(DisasContext *s, DisasOps *o)
3689 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3692 static void cout_cmps64(DisasContext *s, DisasOps *o)
3694 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3697 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3699 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3702 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3704 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3707 static void cout_f32(DisasContext *s, DisasOps *o)
3709 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3712 static void cout_f64(DisasContext *s, DisasOps *o)
3714 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3717 static void cout_f128(DisasContext *s, DisasOps *o)
3719 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3722 static void cout_nabs32(DisasContext *s, DisasOps *o)
3724 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3727 static void cout_nabs64(DisasContext *s, DisasOps *o)
3729 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3732 static void cout_neg32(DisasContext *s, DisasOps *o)
3734 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3737 static void cout_neg64(DisasContext *s, DisasOps *o)
3739 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3742 static void cout_nz32(DisasContext *s, DisasOps *o)
3744 tcg_gen_ext32u_i64(cc_dst, o->out);
3745 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3748 static void cout_nz64(DisasContext *s, DisasOps *o)
3750 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3753 static void cout_s32(DisasContext *s, DisasOps *o)
3755 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3758 static void cout_s64(DisasContext *s, DisasOps *o)
3760 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3763 static void cout_subs32(DisasContext *s, DisasOps *o)
3765 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3768 static void cout_subs64(DisasContext *s, DisasOps *o)
3770 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3773 static void cout_subu32(DisasContext *s, DisasOps *o)
3775 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3778 static void cout_subu64(DisasContext *s, DisasOps *o)
3780 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3783 static void cout_subb32(DisasContext *s, DisasOps *o)
3785 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3788 static void cout_subb64(DisasContext *s, DisasOps *o)
3790 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3793 static void cout_tm32(DisasContext *s, DisasOps *o)
3795 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3798 static void cout_tm64(DisasContext *s, DisasOps *o)
3800 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3803 /* ====================================================================== */
3804 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3805 with the TCG register to which we will write. Used in combination with
3806 the "wout" generators, in some cases we need a new temporary, and in
3807 some cases we can write to a TCG global. */
3809 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3811 o->out = tcg_temp_new_i64();
3813 #define SPEC_prep_new 0
3815 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3817 o->out = tcg_temp_new_i64();
3818 o->out2 = tcg_temp_new_i64();
3820 #define SPEC_prep_new_P 0
3822 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3824 o->out = regs[get_field(f, r1)];
3825 o->g_out = true;
3827 #define SPEC_prep_r1 0
3829 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3831 int r1 = get_field(f, r1);
3832 o->out = regs[r1];
3833 o->out2 = regs[r1 + 1];
3834 o->g_out = o->g_out2 = true;
3836 #define SPEC_prep_r1_P SPEC_r1_even
3838 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3840 o->out = fregs[get_field(f, r1)];
3841 o->g_out = true;
3843 #define SPEC_prep_f1 0
3845 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3847 int r1 = get_field(f, r1);
3848 o->out = fregs[r1];
3849 o->out2 = fregs[r1 + 2];
3850 o->g_out = o->g_out2 = true;
3852 #define SPEC_prep_x1 SPEC_r1_f128
3854 /* ====================================================================== */
3855 /* The "Write OUTput" generators. These generally perform some non-trivial
3856 copy of data to TCG globals, or to main memory. The trivial cases are
3857 generally handled by having a "prep" generator install the TCG global
3858 as the destination of the operation. */
3860 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3862 store_reg(get_field(f, r1), o->out);
3864 #define SPEC_wout_r1 0
3866 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3868 int r1 = get_field(f, r1);
3869 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3871 #define SPEC_wout_r1_8 0
3873 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3875 int r1 = get_field(f, r1);
3876 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3878 #define SPEC_wout_r1_16 0
3880 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3882 store_reg32_i64(get_field(f, r1), o->out);
3884 #define SPEC_wout_r1_32 0
3886 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3888 int r1 = get_field(f, r1);
3889 store_reg32_i64(r1, o->out);
3890 store_reg32_i64(r1 + 1, o->out2);
3892 #define SPEC_wout_r1_P32 SPEC_r1_even
3894 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3896 int r1 = get_field(f, r1);
3897 store_reg32_i64(r1 + 1, o->out);
3898 tcg_gen_shri_i64(o->out, o->out, 32);
3899 store_reg32_i64(r1, o->out);
3901 #define SPEC_wout_r1_D32 SPEC_r1_even
3903 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3905 store_freg32_i64(get_field(f, r1), o->out);
3907 #define SPEC_wout_e1 0
3909 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3911 store_freg(get_field(f, r1), o->out);
3913 #define SPEC_wout_f1 0
3915 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3917 int f1 = get_field(s->fields, r1);
3918 store_freg(f1, o->out);
3919 store_freg(f1 + 2, o->out2);
3921 #define SPEC_wout_x1 SPEC_r1_f128
3923 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3925 if (get_field(f, r1) != get_field(f, r2)) {
3926 store_reg32_i64(get_field(f, r1), o->out);
3929 #define SPEC_wout_cond_r1r2_32 0
3931 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3933 if (get_field(f, r1) != get_field(f, r2)) {
3934 store_freg32_i64(get_field(f, r1), o->out);
3937 #define SPEC_wout_cond_e1e2 0
3939 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3941 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3943 #define SPEC_wout_m1_8 0
3945 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3947 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3949 #define SPEC_wout_m1_16 0
3951 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3953 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3955 #define SPEC_wout_m1_32 0
3957 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3959 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3961 #define SPEC_wout_m1_64 0
3963 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3965 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3967 #define SPEC_wout_m2_32 0
3969 /* ====================================================================== */
3970 /* The "INput 1" generators. These load the first operand to an insn. */
3972 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3974 o->in1 = load_reg(get_field(f, r1));
3976 #define SPEC_in1_r1 0
3978 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3980 o->in1 = regs[get_field(f, r1)];
3981 o->g_in1 = true;
3983 #define SPEC_in1_r1_o 0
3985 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3987 o->in1 = tcg_temp_new_i64();
3988 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3990 #define SPEC_in1_r1_32s 0
3992 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3994 o->in1 = tcg_temp_new_i64();
3995 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3997 #define SPEC_in1_r1_32u 0
3999 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4001 o->in1 = tcg_temp_new_i64();
4002 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4004 #define SPEC_in1_r1_sr32 0
4006 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4008 o->in1 = load_reg(get_field(f, r1) + 1);
4010 #define SPEC_in1_r1p1 SPEC_r1_even
4012 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4014 o->in1 = tcg_temp_new_i64();
4015 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4017 #define SPEC_in1_r1p1_32s SPEC_r1_even
4019 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4021 o->in1 = tcg_temp_new_i64();
4022 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4024 #define SPEC_in1_r1p1_32u SPEC_r1_even
4026 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4028 int r1 = get_field(f, r1);
4029 o->in1 = tcg_temp_new_i64();
4030 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4032 #define SPEC_in1_r1_D32 SPEC_r1_even
4034 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4036 o->in1 = load_reg(get_field(f, r2));
4038 #define SPEC_in1_r2 0
4040 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4042 o->in1 = load_reg(get_field(f, r3));
4044 #define SPEC_in1_r3 0
4046 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4048 o->in1 = regs[get_field(f, r3)];
4049 o->g_in1 = true;
4051 #define SPEC_in1_r3_o 0
4053 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4055 o->in1 = tcg_temp_new_i64();
4056 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4058 #define SPEC_in1_r3_32s 0
4060 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4062 o->in1 = tcg_temp_new_i64();
4063 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4065 #define SPEC_in1_r3_32u 0
4067 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4069 int r3 = get_field(f, r3);
4070 o->in1 = tcg_temp_new_i64();
4071 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4073 #define SPEC_in1_r3_D32 SPEC_r3_even
4075 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4077 o->in1 = load_freg32_i64(get_field(f, r1));
4079 #define SPEC_in1_e1 0
4081 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4083 o->in1 = fregs[get_field(f, r1)];
4084 o->g_in1 = true;
4086 #define SPEC_in1_f1_o 0
4088 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4090 int r1 = get_field(f, r1);
4091 o->out = fregs[r1];
4092 o->out2 = fregs[r1 + 2];
4093 o->g_out = o->g_out2 = true;
4095 #define SPEC_in1_x1_o SPEC_r1_f128
4097 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4099 o->in1 = fregs[get_field(f, r3)];
4100 o->g_in1 = true;
4102 #define SPEC_in1_f3_o 0
4104 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4106 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4108 #define SPEC_in1_la1 0
4110 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4112 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4113 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4115 #define SPEC_in1_la2 0
4117 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4119 in1_la1(s, f, o);
4120 o->in1 = tcg_temp_new_i64();
4121 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4123 #define SPEC_in1_m1_8u 0
4125 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4127 in1_la1(s, f, o);
4128 o->in1 = tcg_temp_new_i64();
4129 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4131 #define SPEC_in1_m1_16s 0
4133 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4135 in1_la1(s, f, o);
4136 o->in1 = tcg_temp_new_i64();
4137 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4139 #define SPEC_in1_m1_16u 0
4141 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4143 in1_la1(s, f, o);
4144 o->in1 = tcg_temp_new_i64();
4145 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4147 #define SPEC_in1_m1_32s 0
4149 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4151 in1_la1(s, f, o);
4152 o->in1 = tcg_temp_new_i64();
4153 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4155 #define SPEC_in1_m1_32u 0
4157 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4159 in1_la1(s, f, o);
4160 o->in1 = tcg_temp_new_i64();
4161 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4163 #define SPEC_in1_m1_64 0
4165 /* ====================================================================== */
4166 /* The "INput 2" generators. These load the second operand to an insn. */
4168 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4170 o->in2 = regs[get_field(f, r1)];
4171 o->g_in2 = true;
4173 #define SPEC_in2_r1_o 0
4175 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4177 o->in2 = tcg_temp_new_i64();
4178 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4180 #define SPEC_in2_r1_16u 0
4182 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4184 o->in2 = tcg_temp_new_i64();
4185 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4187 #define SPEC_in2_r1_32u 0
4189 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4191 int r1 = get_field(f, r1);
4192 o->in2 = tcg_temp_new_i64();
4193 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4195 #define SPEC_in2_r1_D32 SPEC_r1_even
4197 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4199 o->in2 = load_reg(get_field(f, r2));
4201 #define SPEC_in2_r2 0
4203 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4205 o->in2 = regs[get_field(f, r2)];
4206 o->g_in2 = true;
4208 #define SPEC_in2_r2_o 0
4210 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4212 int r2 = get_field(f, r2);
4213 if (r2 != 0) {
4214 o->in2 = load_reg(r2);
4217 #define SPEC_in2_r2_nz 0
4219 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4221 o->in2 = tcg_temp_new_i64();
4222 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4224 #define SPEC_in2_r2_8s 0
4226 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4228 o->in2 = tcg_temp_new_i64();
4229 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4231 #define SPEC_in2_r2_8u 0
4233 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4235 o->in2 = tcg_temp_new_i64();
4236 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4238 #define SPEC_in2_r2_16s 0
4240 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4242 o->in2 = tcg_temp_new_i64();
4243 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4245 #define SPEC_in2_r2_16u 0
4247 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4249 o->in2 = load_reg(get_field(f, r3));
4251 #define SPEC_in2_r3 0
4253 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4255 o->in2 = tcg_temp_new_i64();
4256 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4258 #define SPEC_in2_r2_32s 0
4260 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4262 o->in2 = tcg_temp_new_i64();
4263 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4265 #define SPEC_in2_r2_32u 0
4267 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4269 o->in2 = load_freg32_i64(get_field(f, r2));
4271 #define SPEC_in2_e2 0
4273 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4275 o->in2 = fregs[get_field(f, r2)];
4276 o->g_in2 = true;
4278 #define SPEC_in2_f2_o 0
4280 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4282 int r2 = get_field(f, r2);
4283 o->in1 = fregs[r2];
4284 o->in2 = fregs[r2 + 2];
4285 o->g_in1 = o->g_in2 = true;
4287 #define SPEC_in2_x2_o SPEC_r2_f128
4289 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4291 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4293 #define SPEC_in2_ra2 0
4295 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4297 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4298 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4300 #define SPEC_in2_a2 0
4302 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4304 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4306 #define SPEC_in2_ri2 0
4308 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4310 help_l2_shift(s, f, o, 31);
4312 #define SPEC_in2_sh32 0
4314 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4316 help_l2_shift(s, f, o, 63);
4318 #define SPEC_in2_sh64 0
4320 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4322 in2_a2(s, f, o);
4323 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4325 #define SPEC_in2_m2_8u 0
4327 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4329 in2_a2(s, f, o);
4330 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4332 #define SPEC_in2_m2_16s 0
4334 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4336 in2_a2(s, f, o);
4337 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4339 #define SPEC_in2_m2_16u 0
4341 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4343 in2_a2(s, f, o);
4344 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4346 #define SPEC_in2_m2_32s 0
4348 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4350 in2_a2(s, f, o);
4351 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4353 #define SPEC_in2_m2_32u 0
4355 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4357 in2_a2(s, f, o);
4358 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4360 #define SPEC_in2_m2_64 0
4362 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4364 in2_ri2(s, f, o);
4365 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4367 #define SPEC_in2_mri2_16u 0
4369 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4371 in2_ri2(s, f, o);
4372 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4374 #define SPEC_in2_mri2_32s 0
4376 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4378 in2_ri2(s, f, o);
4379 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4381 #define SPEC_in2_mri2_32u 0
4383 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4385 in2_ri2(s, f, o);
4386 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4388 #define SPEC_in2_mri2_64 0
4390 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4392 o->in2 = tcg_const_i64(get_field(f, i2));
4394 #define SPEC_in2_i2 0
4396 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4398 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4400 #define SPEC_in2_i2_8u 0
4402 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4404 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4406 #define SPEC_in2_i2_16u 0
4408 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4410 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4412 #define SPEC_in2_i2_32u 0
4414 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4416 uint64_t i2 = (uint16_t)get_field(f, i2);
4417 o->in2 = tcg_const_i64(i2 << s->insn->data);
4419 #define SPEC_in2_i2_16u_shl 0
4421 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4423 uint64_t i2 = (uint32_t)get_field(f, i2);
4424 o->in2 = tcg_const_i64(i2 << s->insn->data);
4426 #define SPEC_in2_i2_32u_shl 0
4428 /* ====================================================================== */
4430 /* Find opc within the table of insns. This is formulated as a switch
4431 statement so that (1) we get compile-time notice of cut-paste errors
4432 for duplicated opcodes, and (2) the compiler generates the binary
4433 search tree, rather than us having to post-process the table. */
4435 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4436 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4438 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4440 enum DisasInsnEnum {
4441 #include "insn-data.def"
4444 #undef D
4445 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4446 .opc = OPC, \
4447 .fmt = FMT_##FT, \
4448 .fac = FAC_##FC, \
4449 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4450 .name = #NM, \
4451 .help_in1 = in1_##I1, \
4452 .help_in2 = in2_##I2, \
4453 .help_prep = prep_##P, \
4454 .help_wout = wout_##W, \
4455 .help_cout = cout_##CC, \
4456 .help_op = op_##OP, \
4457 .data = D \
4460 /* Allow 0 to be used for NULL in the table below. */
4461 #define in1_0 NULL
4462 #define in2_0 NULL
4463 #define prep_0 NULL
4464 #define wout_0 NULL
4465 #define cout_0 NULL
4466 #define op_0 NULL
4468 #define SPEC_in1_0 0
4469 #define SPEC_in2_0 0
4470 #define SPEC_prep_0 0
4471 #define SPEC_wout_0 0
4473 static const DisasInsn insn_info[] = {
4474 #include "insn-data.def"
4477 #undef D
4478 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4479 case OPC: return &insn_info[insn_ ## NM];
4481 static const DisasInsn *lookup_opc(uint16_t opc)
4483 switch (opc) {
4484 #include "insn-data.def"
4485 default:
4486 return NULL;
4490 #undef D
4491 #undef C
4493 /* Extract a field from the insn. The INSN should be left-aligned in
4494 the uint64_t so that we can more easily utilize the big-bit-endian
4495 definitions we extract from the Principals of Operation. */
4497 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4499 uint32_t r, m;
4501 if (f->size == 0) {
4502 return;
4505 /* Zero extract the field from the insn. */
4506 r = (insn << f->beg) >> (64 - f->size);
4508 /* Sign-extend, or un-swap the field as necessary. */
4509 switch (f->type) {
4510 case 0: /* unsigned */
4511 break;
4512 case 1: /* signed */
4513 assert(f->size <= 32);
4514 m = 1u << (f->size - 1);
4515 r = (r ^ m) - m;
4516 break;
4517 case 2: /* dl+dh split, signed 20 bit. */
4518 r = ((int8_t)r << 12) | (r >> 8);
4519 break;
4520 default:
4521 abort();
4524 /* Validate that the "compressed" encoding we selected above is valid.
4525 I.e. we havn't make two different original fields overlap. */
4526 assert(((o->presentC >> f->indexC) & 1) == 0);
4527 o->presentC |= 1 << f->indexC;
4528 o->presentO |= 1 << f->indexO;
4530 o->c[f->indexC] = r;
4533 /* Lookup the insn at the current PC, extracting the operands into O and
4534 returning the info struct for the insn. Returns NULL for invalid insn. */
4536 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4537 DisasFields *f)
4539 uint64_t insn, pc = s->pc;
4540 int op, op2, ilen;
4541 const DisasInsn *info;
4543 insn = ld_code2(env, pc);
4544 op = (insn >> 8) & 0xff;
4545 ilen = get_ilen(op);
4546 s->next_pc = s->pc + ilen;
4548 switch (ilen) {
4549 case 2:
4550 insn = insn << 48;
4551 break;
4552 case 4:
4553 insn = ld_code4(env, pc) << 32;
4554 break;
4555 case 6:
4556 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4557 break;
4558 default:
4559 abort();
4562 /* We can't actually determine the insn format until we've looked up
4563 the full insn opcode. Which we can't do without locating the
4564 secondary opcode. Assume by default that OP2 is at bit 40; for
4565 those smaller insns that don't actually have a secondary opcode
4566 this will correctly result in OP2 = 0. */
4567 switch (op) {
4568 case 0x01: /* E */
4569 case 0x80: /* S */
4570 case 0x82: /* S */
4571 case 0x93: /* S */
4572 case 0xb2: /* S, RRF, RRE */
4573 case 0xb3: /* RRE, RRD, RRF */
4574 case 0xb9: /* RRE, RRF */
4575 case 0xe5: /* SSE, SIL */
4576 op2 = (insn << 8) >> 56;
4577 break;
4578 case 0xa5: /* RI */
4579 case 0xa7: /* RI */
4580 case 0xc0: /* RIL */
4581 case 0xc2: /* RIL */
4582 case 0xc4: /* RIL */
4583 case 0xc6: /* RIL */
4584 case 0xc8: /* SSF */
4585 case 0xcc: /* RIL */
4586 op2 = (insn << 12) >> 60;
4587 break;
4588 case 0xd0 ... 0xdf: /* SS */
4589 case 0xe1: /* SS */
4590 case 0xe2: /* SS */
4591 case 0xe8: /* SS */
4592 case 0xe9: /* SS */
4593 case 0xea: /* SS */
4594 case 0xee ... 0xf3: /* SS */
4595 case 0xf8 ... 0xfd: /* SS */
4596 op2 = 0;
4597 break;
4598 default:
4599 op2 = (insn << 40) >> 56;
4600 break;
4603 memset(f, 0, sizeof(*f));
4604 f->op = op;
4605 f->op2 = op2;
4607 /* Lookup the instruction. */
4608 info = lookup_opc(op << 8 | op2);
4610 /* If we found it, extract the operands. */
4611 if (info != NULL) {
4612 DisasFormat fmt = info->fmt;
4613 int i;
4615 for (i = 0; i < NUM_C_FIELD; ++i) {
4616 extract_field(f, &format_info[fmt].op[i], insn);
4619 return info;
4622 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4624 const DisasInsn *insn;
4625 ExitStatus ret = NO_EXIT;
4626 DisasFields f;
4627 DisasOps o;
4629 /* Search for the insn in the table. */
4630 insn = extract_insn(env, s, &f);
4632 /* Not found means unimplemented/illegal opcode. */
4633 if (insn == NULL) {
4634 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4635 f.op, f.op2);
4636 gen_illegal_opcode(s);
4637 return EXIT_NORETURN;
4640 /* Check for insn specification exceptions. */
4641 if (insn->spec) {
4642 int spec = insn->spec, excp = 0, r;
4644 if (spec & SPEC_r1_even) {
4645 r = get_field(&f, r1);
4646 if (r & 1) {
4647 excp = PGM_SPECIFICATION;
4650 if (spec & SPEC_r2_even) {
4651 r = get_field(&f, r2);
4652 if (r & 1) {
4653 excp = PGM_SPECIFICATION;
4656 if (spec & SPEC_r3_even) {
4657 r = get_field(&f, r3);
4658 if (r & 1) {
4659 excp = PGM_SPECIFICATION;
4662 if (spec & SPEC_r1_f128) {
4663 r = get_field(&f, r1);
4664 if (r > 13) {
4665 excp = PGM_SPECIFICATION;
4668 if (spec & SPEC_r2_f128) {
4669 r = get_field(&f, r2);
4670 if (r > 13) {
4671 excp = PGM_SPECIFICATION;
4674 if (excp) {
4675 gen_program_exception(s, excp);
4676 return EXIT_NORETURN;
4680 /* Set up the strutures we use to communicate with the helpers. */
4681 s->insn = insn;
4682 s->fields = &f;
4683 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4684 TCGV_UNUSED_I64(o.out);
4685 TCGV_UNUSED_I64(o.out2);
4686 TCGV_UNUSED_I64(o.in1);
4687 TCGV_UNUSED_I64(o.in2);
4688 TCGV_UNUSED_I64(o.addr1);
4690 /* Implement the instruction. */
4691 if (insn->help_in1) {
4692 insn->help_in1(s, &f, &o);
4694 if (insn->help_in2) {
4695 insn->help_in2(s, &f, &o);
4697 if (insn->help_prep) {
4698 insn->help_prep(s, &f, &o);
4700 if (insn->help_op) {
4701 ret = insn->help_op(s, &o);
4703 if (insn->help_wout) {
4704 insn->help_wout(s, &f, &o);
4706 if (insn->help_cout) {
4707 insn->help_cout(s, &o);
4710 /* Free any temporaries created by the helpers. */
4711 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4712 tcg_temp_free_i64(o.out);
4714 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4715 tcg_temp_free_i64(o.out2);
4717 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4718 tcg_temp_free_i64(o.in1);
4720 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4721 tcg_temp_free_i64(o.in2);
4723 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4724 tcg_temp_free_i64(o.addr1);
4727 /* Advance to the next instruction. */
4728 s->pc = s->next_pc;
4729 return ret;
4732 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4733 TranslationBlock *tb,
4734 bool search_pc)
4736 CPUState *cs = CPU(cpu);
4737 CPUS390XState *env = &cpu->env;
4738 DisasContext dc;
4739 target_ulong pc_start;
4740 uint64_t next_page_start;
4741 uint16_t *gen_opc_end;
4742 int j, lj = -1;
4743 int num_insns, max_insns;
4744 CPUBreakpoint *bp;
4745 ExitStatus status;
4746 bool do_debug;
4748 pc_start = tb->pc;
4750 /* 31-bit mode */
4751 if (!(tb->flags & FLAG_MASK_64)) {
4752 pc_start &= 0x7fffffff;
4755 dc.tb = tb;
4756 dc.pc = pc_start;
4757 dc.cc_op = CC_OP_DYNAMIC;
4758 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4760 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4762 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4764 num_insns = 0;
4765 max_insns = tb->cflags & CF_COUNT_MASK;
4766 if (max_insns == 0) {
4767 max_insns = CF_COUNT_MASK;
4770 gen_tb_start();
4772 do {
4773 if (search_pc) {
4774 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4775 if (lj < j) {
4776 lj++;
4777 while (lj < j) {
4778 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4781 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4782 gen_opc_cc_op[lj] = dc.cc_op;
4783 tcg_ctx.gen_opc_instr_start[lj] = 1;
4784 tcg_ctx.gen_opc_icount[lj] = num_insns;
4786 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4787 gen_io_start();
4790 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4791 tcg_gen_debug_insn_start(dc.pc);
4794 status = NO_EXIT;
4795 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4796 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4797 if (bp->pc == dc.pc) {
4798 status = EXIT_PC_STALE;
4799 do_debug = true;
4800 break;
4804 if (status == NO_EXIT) {
4805 status = translate_one(env, &dc);
4808 /* If we reach a page boundary, are single stepping,
4809 or exhaust instruction count, stop generation. */
4810 if (status == NO_EXIT
4811 && (dc.pc >= next_page_start
4812 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4813 || num_insns >= max_insns
4814 || singlestep
4815 || cs->singlestep_enabled)) {
4816 status = EXIT_PC_STALE;
4818 } while (status == NO_EXIT);
4820 if (tb->cflags & CF_LAST_IO) {
4821 gen_io_end();
4824 switch (status) {
4825 case EXIT_GOTO_TB:
4826 case EXIT_NORETURN:
4827 break;
4828 case EXIT_PC_STALE:
4829 update_psw_addr(&dc);
4830 /* FALLTHRU */
4831 case EXIT_PC_UPDATED:
4832 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4833 cc op type is in env */
4834 update_cc_op(&dc);
4835 /* Exit the TB, either by raising a debug exception or by return. */
4836 if (do_debug) {
4837 gen_exception(EXCP_DEBUG);
4838 } else {
4839 tcg_gen_exit_tb(0);
4841 break;
4842 default:
4843 abort();
4846 gen_tb_end(tb, num_insns);
4847 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4848 if (search_pc) {
4849 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4850 lj++;
4851 while (lj <= j) {
4852 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4854 } else {
4855 tb->size = dc.pc - pc_start;
4856 tb->icount = num_insns;
4859 #if defined(S390X_DEBUG_DISAS)
4860 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4861 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4862 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4863 qemu_log("\n");
4865 #endif
4868 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4870 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
4873 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4875 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
4878 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4880 int cc_op;
4881 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4882 cc_op = gen_opc_cc_op[pc_pos];
4883 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4884 env->cc_op = cc_op;