hw/arm/virt: formatting: memory map
[qemu/ar7.git] / target-s390x / translate.c
blobe2a1d05f153d2c6a8034d7e2b43d3414f7e659fb
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
72 #define DISAS_EXCP 4
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
86 return pc;
89 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
92 S390CPU *cpu = S390_CPU(cs);
93 CPUS390XState *env = &cpu->env;
94 int i;
96 if (env->cc_op > 3) {
97 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
98 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
99 } else {
100 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
101 env->psw.mask, env->psw.addr, env->cc_op);
104 for (i = 0; i < 16; i++) {
105 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
106 if ((i % 4) == 3) {
107 cpu_fprintf(f, "\n");
108 } else {
109 cpu_fprintf(f, " ");
113 for (i = 0; i < 16; i++) {
114 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
115 if ((i % 4) == 3) {
116 cpu_fprintf(f, "\n");
117 } else {
118 cpu_fprintf(f, " ");
122 #ifndef CONFIG_USER_ONLY
123 for (i = 0; i < 16; i++) {
124 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
125 if ((i % 4) == 3) {
126 cpu_fprintf(f, "\n");
127 } else {
128 cpu_fprintf(f, " ");
131 #endif
133 #ifdef DEBUG_INLINE_BRANCHES
134 for (i = 0; i < CC_OP_MAX; i++) {
135 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
136 inline_branch_miss[i], inline_branch_hit[i]);
138 #endif
140 cpu_fprintf(f, "\n");
143 static TCGv_i64 psw_addr;
144 static TCGv_i64 psw_mask;
146 static TCGv_i32 cc_op;
147 static TCGv_i64 cc_src;
148 static TCGv_i64 cc_dst;
149 static TCGv_i64 cc_vr;
151 static char cpu_reg_names[32][4];
152 static TCGv_i64 regs[16];
153 static TCGv_i64 fregs[16];
155 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
157 void s390x_translate_init(void)
159 int i;
161 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
162 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
163 offsetof(CPUS390XState, psw.addr),
164 "psw_addr");
165 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
166 offsetof(CPUS390XState, psw.mask),
167 "psw_mask");
169 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
170 "cc_op");
171 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
172 "cc_src");
173 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
174 "cc_dst");
175 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
176 "cc_vr");
178 for (i = 0; i < 16; i++) {
179 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
180 regs[i] = tcg_global_mem_new(TCG_AREG0,
181 offsetof(CPUS390XState, regs[i]),
182 cpu_reg_names[i]);
185 for (i = 0; i < 16; i++) {
186 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
187 fregs[i] = tcg_global_mem_new(TCG_AREG0,
188 offsetof(CPUS390XState, fregs[i].d),
189 cpu_reg_names[i + 16]);
193 static TCGv_i64 load_reg(int reg)
195 TCGv_i64 r = tcg_temp_new_i64();
196 tcg_gen_mov_i64(r, regs[reg]);
197 return r;
200 static TCGv_i64 load_freg32_i64(int reg)
202 TCGv_i64 r = tcg_temp_new_i64();
203 tcg_gen_shri_i64(r, fregs[reg], 32);
204 return r;
207 static void store_reg(int reg, TCGv_i64 v)
209 tcg_gen_mov_i64(regs[reg], v);
212 static void store_freg(int reg, TCGv_i64 v)
214 tcg_gen_mov_i64(fregs[reg], v);
217 static void store_reg32_i64(int reg, TCGv_i64 v)
219 /* 32 bit register writes keep the upper half */
220 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 static void store_reg32h_i64(int reg, TCGv_i64 v)
225 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 static void store_freg32_i64(int reg, TCGv_i64 v)
230 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 static void return_low128(TCGv_i64 dest)
235 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 static void update_psw_addr(DisasContext *s)
240 /* psw.addr */
241 tcg_gen_movi_i64(psw_addr, s->pc);
244 static void update_cc_op(DisasContext *s)
246 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
247 tcg_gen_movi_i32(cc_op, s->cc_op);
251 static void potential_page_fault(DisasContext *s)
253 update_psw_addr(s);
254 update_cc_op(s);
257 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
259 return (uint64_t)cpu_lduw_code(env, pc);
262 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
264 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 static int get_mem_index(DisasContext *s)
269 switch (s->tb->flags & FLAG_MASK_ASC) {
270 case PSW_ASC_PRIMARY >> 32:
271 return 0;
272 case PSW_ASC_SECONDARY >> 32:
273 return 1;
274 case PSW_ASC_HOME >> 32:
275 return 2;
276 default:
277 tcg_abort();
278 break;
282 static void gen_exception(int excp)
284 TCGv_i32 tmp = tcg_const_i32(excp);
285 gen_helper_exception(cpu_env, tmp);
286 tcg_temp_free_i32(tmp);
289 static void gen_program_exception(DisasContext *s, int code)
291 TCGv_i32 tmp;
293 /* Remember what pgm exeption this was. */
294 tmp = tcg_const_i32(code);
295 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
296 tcg_temp_free_i32(tmp);
298 tmp = tcg_const_i32(s->next_pc - s->pc);
299 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
300 tcg_temp_free_i32(tmp);
302 /* Advance past instruction. */
303 s->pc = s->next_pc;
304 update_psw_addr(s);
306 /* Save off cc. */
307 update_cc_op(s);
309 /* Trigger exception. */
310 gen_exception(EXCP_PGM);
313 static inline void gen_illegal_opcode(DisasContext *s)
315 gen_program_exception(s, PGM_SPECIFICATION);
318 static inline void check_privileged(DisasContext *s)
320 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
321 gen_program_exception(s, PGM_PRIVILEGED);
325 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
327 TCGv_i64 tmp = tcg_temp_new_i64();
328 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
330 /* Note that d2 is limited to 20 bits, signed. If we crop negative
331 displacements early we create larger immedate addends. */
333 /* Note that addi optimizes the imm==0 case. */
334 if (b2 && x2) {
335 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
336 tcg_gen_addi_i64(tmp, tmp, d2);
337 } else if (b2) {
338 tcg_gen_addi_i64(tmp, regs[b2], d2);
339 } else if (x2) {
340 tcg_gen_addi_i64(tmp, regs[x2], d2);
341 } else {
342 if (need_31) {
343 d2 &= 0x7fffffff;
344 need_31 = false;
346 tcg_gen_movi_i64(tmp, d2);
348 if (need_31) {
349 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
352 return tmp;
355 static inline bool live_cc_data(DisasContext *s)
357 return (s->cc_op != CC_OP_DYNAMIC
358 && s->cc_op != CC_OP_STATIC
359 && s->cc_op > 3);
362 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
364 if (live_cc_data(s)) {
365 tcg_gen_discard_i64(cc_src);
366 tcg_gen_discard_i64(cc_dst);
367 tcg_gen_discard_i64(cc_vr);
369 s->cc_op = CC_OP_CONST0 + val;
372 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
374 if (live_cc_data(s)) {
375 tcg_gen_discard_i64(cc_src);
376 tcg_gen_discard_i64(cc_vr);
378 tcg_gen_mov_i64(cc_dst, dst);
379 s->cc_op = op;
382 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
383 TCGv_i64 dst)
385 if (live_cc_data(s)) {
386 tcg_gen_discard_i64(cc_vr);
388 tcg_gen_mov_i64(cc_src, src);
389 tcg_gen_mov_i64(cc_dst, dst);
390 s->cc_op = op;
393 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
394 TCGv_i64 dst, TCGv_i64 vr)
396 tcg_gen_mov_i64(cc_src, src);
397 tcg_gen_mov_i64(cc_dst, dst);
398 tcg_gen_mov_i64(cc_vr, vr);
399 s->cc_op = op;
402 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
404 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
407 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
409 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
412 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
414 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
417 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
419 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
422 /* CC value is in env->cc_op */
423 static void set_cc_static(DisasContext *s)
425 if (live_cc_data(s)) {
426 tcg_gen_discard_i64(cc_src);
427 tcg_gen_discard_i64(cc_dst);
428 tcg_gen_discard_i64(cc_vr);
430 s->cc_op = CC_OP_STATIC;
433 /* calculates cc into cc_op */
434 static void gen_op_calc_cc(DisasContext *s)
436 TCGv_i32 local_cc_op;
437 TCGv_i64 dummy;
439 TCGV_UNUSED_I32(local_cc_op);
440 TCGV_UNUSED_I64(dummy);
441 switch (s->cc_op) {
442 default:
443 dummy = tcg_const_i64(0);
444 /* FALLTHRU */
445 case CC_OP_ADD_64:
446 case CC_OP_ADDU_64:
447 case CC_OP_ADDC_64:
448 case CC_OP_SUB_64:
449 case CC_OP_SUBU_64:
450 case CC_OP_SUBB_64:
451 case CC_OP_ADD_32:
452 case CC_OP_ADDU_32:
453 case CC_OP_ADDC_32:
454 case CC_OP_SUB_32:
455 case CC_OP_SUBU_32:
456 case CC_OP_SUBB_32:
457 local_cc_op = tcg_const_i32(s->cc_op);
458 break;
459 case CC_OP_CONST0:
460 case CC_OP_CONST1:
461 case CC_OP_CONST2:
462 case CC_OP_CONST3:
463 case CC_OP_STATIC:
464 case CC_OP_DYNAMIC:
465 break;
468 switch (s->cc_op) {
469 case CC_OP_CONST0:
470 case CC_OP_CONST1:
471 case CC_OP_CONST2:
472 case CC_OP_CONST3:
473 /* s->cc_op is the cc value */
474 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
475 break;
476 case CC_OP_STATIC:
477 /* env->cc_op already is the cc value */
478 break;
479 case CC_OP_NZ:
480 case CC_OP_ABS_64:
481 case CC_OP_NABS_64:
482 case CC_OP_ABS_32:
483 case CC_OP_NABS_32:
484 case CC_OP_LTGT0_32:
485 case CC_OP_LTGT0_64:
486 case CC_OP_COMP_32:
487 case CC_OP_COMP_64:
488 case CC_OP_NZ_F32:
489 case CC_OP_NZ_F64:
490 case CC_OP_FLOGR:
491 /* 1 argument */
492 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
493 break;
494 case CC_OP_ICM:
495 case CC_OP_LTGT_32:
496 case CC_OP_LTGT_64:
497 case CC_OP_LTUGTU_32:
498 case CC_OP_LTUGTU_64:
499 case CC_OP_TM_32:
500 case CC_OP_TM_64:
501 case CC_OP_SLA_32:
502 case CC_OP_SLA_64:
503 case CC_OP_NZ_F128:
504 /* 2 arguments */
505 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
506 break;
507 case CC_OP_ADD_64:
508 case CC_OP_ADDU_64:
509 case CC_OP_ADDC_64:
510 case CC_OP_SUB_64:
511 case CC_OP_SUBU_64:
512 case CC_OP_SUBB_64:
513 case CC_OP_ADD_32:
514 case CC_OP_ADDU_32:
515 case CC_OP_ADDC_32:
516 case CC_OP_SUB_32:
517 case CC_OP_SUBU_32:
518 case CC_OP_SUBB_32:
519 /* 3 arguments */
520 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
521 break;
522 case CC_OP_DYNAMIC:
523 /* unknown operation - assume 3 arguments and cc_op in env */
524 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
525 break;
526 default:
527 tcg_abort();
530 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
531 tcg_temp_free_i32(local_cc_op);
533 if (!TCGV_IS_UNUSED_I64(dummy)) {
534 tcg_temp_free_i64(dummy);
537 /* We now have cc in cc_op as constant */
538 set_cc_static(s);
541 static int use_goto_tb(DisasContext *s, uint64_t dest)
543 /* NOTE: we handle the case where the TB spans two pages here */
544 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
545 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
546 && !s->singlestep_enabled
547 && !(s->tb->cflags & CF_LAST_IO));
550 static void account_noninline_branch(DisasContext *s, int cc_op)
552 #ifdef DEBUG_INLINE_BRANCHES
553 inline_branch_miss[cc_op]++;
554 #endif
557 static void account_inline_branch(DisasContext *s, int cc_op)
559 #ifdef DEBUG_INLINE_BRANCHES
560 inline_branch_hit[cc_op]++;
561 #endif
564 /* Table of mask values to comparison codes, given a comparison as input.
565 For such, CC=3 should not be possible. */
566 static const TCGCond ltgt_cond[16] = {
567 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
568 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
569 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
570 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
571 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
572 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
573 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
574 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
577 /* Table of mask values to comparison codes, given a logic op as input.
578 For such, only CC=0 and CC=1 should be possible. */
579 static const TCGCond nz_cond[16] = {
580 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
581 TCG_COND_NEVER, TCG_COND_NEVER,
582 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
583 TCG_COND_NE, TCG_COND_NE,
584 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
585 TCG_COND_EQ, TCG_COND_EQ,
586 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
587 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
590 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
591 details required to generate a TCG comparison. */
592 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
594 TCGCond cond;
595 enum cc_op old_cc_op = s->cc_op;
597 if (mask == 15 || mask == 0) {
598 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
599 c->u.s32.a = cc_op;
600 c->u.s32.b = cc_op;
601 c->g1 = c->g2 = true;
602 c->is_64 = false;
603 return;
606 /* Find the TCG condition for the mask + cc op. */
607 switch (old_cc_op) {
608 case CC_OP_LTGT0_32:
609 case CC_OP_LTGT0_64:
610 case CC_OP_LTGT_32:
611 case CC_OP_LTGT_64:
612 cond = ltgt_cond[mask];
613 if (cond == TCG_COND_NEVER) {
614 goto do_dynamic;
616 account_inline_branch(s, old_cc_op);
617 break;
619 case CC_OP_LTUGTU_32:
620 case CC_OP_LTUGTU_64:
621 cond = tcg_unsigned_cond(ltgt_cond[mask]);
622 if (cond == TCG_COND_NEVER) {
623 goto do_dynamic;
625 account_inline_branch(s, old_cc_op);
626 break;
628 case CC_OP_NZ:
629 cond = nz_cond[mask];
630 if (cond == TCG_COND_NEVER) {
631 goto do_dynamic;
633 account_inline_branch(s, old_cc_op);
634 break;
636 case CC_OP_TM_32:
637 case CC_OP_TM_64:
638 switch (mask) {
639 case 8:
640 cond = TCG_COND_EQ;
641 break;
642 case 4 | 2 | 1:
643 cond = TCG_COND_NE;
644 break;
645 default:
646 goto do_dynamic;
648 account_inline_branch(s, old_cc_op);
649 break;
651 case CC_OP_ICM:
652 switch (mask) {
653 case 8:
654 cond = TCG_COND_EQ;
655 break;
656 case 4 | 2 | 1:
657 case 4 | 2:
658 cond = TCG_COND_NE;
659 break;
660 default:
661 goto do_dynamic;
663 account_inline_branch(s, old_cc_op);
664 break;
666 case CC_OP_FLOGR:
667 switch (mask & 0xa) {
668 case 8: /* src == 0 -> no one bit found */
669 cond = TCG_COND_EQ;
670 break;
671 case 2: /* src != 0 -> one bit found */
672 cond = TCG_COND_NE;
673 break;
674 default:
675 goto do_dynamic;
677 account_inline_branch(s, old_cc_op);
678 break;
680 case CC_OP_ADDU_32:
681 case CC_OP_ADDU_64:
682 switch (mask) {
683 case 8 | 2: /* vr == 0 */
684 cond = TCG_COND_EQ;
685 break;
686 case 4 | 1: /* vr != 0 */
687 cond = TCG_COND_NE;
688 break;
689 case 8 | 4: /* no carry -> vr >= src */
690 cond = TCG_COND_GEU;
691 break;
692 case 2 | 1: /* carry -> vr < src */
693 cond = TCG_COND_LTU;
694 break;
695 default:
696 goto do_dynamic;
698 account_inline_branch(s, old_cc_op);
699 break;
701 case CC_OP_SUBU_32:
702 case CC_OP_SUBU_64:
703 /* Note that CC=0 is impossible; treat it as dont-care. */
704 switch (mask & 7) {
705 case 2: /* zero -> op1 == op2 */
706 cond = TCG_COND_EQ;
707 break;
708 case 4 | 1: /* !zero -> op1 != op2 */
709 cond = TCG_COND_NE;
710 break;
711 case 4: /* borrow (!carry) -> op1 < op2 */
712 cond = TCG_COND_LTU;
713 break;
714 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
715 cond = TCG_COND_GEU;
716 break;
717 default:
718 goto do_dynamic;
720 account_inline_branch(s, old_cc_op);
721 break;
723 default:
724 do_dynamic:
725 /* Calculate cc value. */
726 gen_op_calc_cc(s);
727 /* FALLTHRU */
729 case CC_OP_STATIC:
730 /* Jump based on CC. We'll load up the real cond below;
731 the assignment here merely avoids a compiler warning. */
732 account_noninline_branch(s, old_cc_op);
733 old_cc_op = CC_OP_STATIC;
734 cond = TCG_COND_NEVER;
735 break;
738 /* Load up the arguments of the comparison. */
739 c->is_64 = true;
740 c->g1 = c->g2 = false;
741 switch (old_cc_op) {
742 case CC_OP_LTGT0_32:
743 c->is_64 = false;
744 c->u.s32.a = tcg_temp_new_i32();
745 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
746 c->u.s32.b = tcg_const_i32(0);
747 break;
748 case CC_OP_LTGT_32:
749 case CC_OP_LTUGTU_32:
750 case CC_OP_SUBU_32:
751 c->is_64 = false;
752 c->u.s32.a = tcg_temp_new_i32();
753 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
754 c->u.s32.b = tcg_temp_new_i32();
755 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
756 break;
758 case CC_OP_LTGT0_64:
759 case CC_OP_NZ:
760 case CC_OP_FLOGR:
761 c->u.s64.a = cc_dst;
762 c->u.s64.b = tcg_const_i64(0);
763 c->g1 = true;
764 break;
765 case CC_OP_LTGT_64:
766 case CC_OP_LTUGTU_64:
767 case CC_OP_SUBU_64:
768 c->u.s64.a = cc_src;
769 c->u.s64.b = cc_dst;
770 c->g1 = c->g2 = true;
771 break;
773 case CC_OP_TM_32:
774 case CC_OP_TM_64:
775 case CC_OP_ICM:
776 c->u.s64.a = tcg_temp_new_i64();
777 c->u.s64.b = tcg_const_i64(0);
778 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
779 break;
781 case CC_OP_ADDU_32:
782 c->is_64 = false;
783 c->u.s32.a = tcg_temp_new_i32();
784 c->u.s32.b = tcg_temp_new_i32();
785 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
786 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
787 tcg_gen_movi_i32(c->u.s32.b, 0);
788 } else {
789 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
791 break;
793 case CC_OP_ADDU_64:
794 c->u.s64.a = cc_vr;
795 c->g1 = true;
796 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
797 c->u.s64.b = tcg_const_i64(0);
798 } else {
799 c->u.s64.b = cc_src;
800 c->g2 = true;
802 break;
804 case CC_OP_STATIC:
805 c->is_64 = false;
806 c->u.s32.a = cc_op;
807 c->g1 = true;
808 switch (mask) {
809 case 0x8 | 0x4 | 0x2: /* cc != 3 */
810 cond = TCG_COND_NE;
811 c->u.s32.b = tcg_const_i32(3);
812 break;
813 case 0x8 | 0x4 | 0x1: /* cc != 2 */
814 cond = TCG_COND_NE;
815 c->u.s32.b = tcg_const_i32(2);
816 break;
817 case 0x8 | 0x2 | 0x1: /* cc != 1 */
818 cond = TCG_COND_NE;
819 c->u.s32.b = tcg_const_i32(1);
820 break;
821 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
822 cond = TCG_COND_EQ;
823 c->g1 = false;
824 c->u.s32.a = tcg_temp_new_i32();
825 c->u.s32.b = tcg_const_i32(0);
826 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
827 break;
828 case 0x8 | 0x4: /* cc < 2 */
829 cond = TCG_COND_LTU;
830 c->u.s32.b = tcg_const_i32(2);
831 break;
832 case 0x8: /* cc == 0 */
833 cond = TCG_COND_EQ;
834 c->u.s32.b = tcg_const_i32(0);
835 break;
836 case 0x4 | 0x2 | 0x1: /* cc != 0 */
837 cond = TCG_COND_NE;
838 c->u.s32.b = tcg_const_i32(0);
839 break;
840 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
841 cond = TCG_COND_NE;
842 c->g1 = false;
843 c->u.s32.a = tcg_temp_new_i32();
844 c->u.s32.b = tcg_const_i32(0);
845 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
846 break;
847 case 0x4: /* cc == 1 */
848 cond = TCG_COND_EQ;
849 c->u.s32.b = tcg_const_i32(1);
850 break;
851 case 0x2 | 0x1: /* cc > 1 */
852 cond = TCG_COND_GTU;
853 c->u.s32.b = tcg_const_i32(1);
854 break;
855 case 0x2: /* cc == 2 */
856 cond = TCG_COND_EQ;
857 c->u.s32.b = tcg_const_i32(2);
858 break;
859 case 0x1: /* cc == 3 */
860 cond = TCG_COND_EQ;
861 c->u.s32.b = tcg_const_i32(3);
862 break;
863 default:
864 /* CC is masked by something else: (8 >> cc) & mask. */
865 cond = TCG_COND_NE;
866 c->g1 = false;
867 c->u.s32.a = tcg_const_i32(8);
868 c->u.s32.b = tcg_const_i32(0);
869 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
870 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
871 break;
873 break;
875 default:
876 abort();
878 c->cond = cond;
881 static void free_compare(DisasCompare *c)
883 if (!c->g1) {
884 if (c->is_64) {
885 tcg_temp_free_i64(c->u.s64.a);
886 } else {
887 tcg_temp_free_i32(c->u.s32.a);
890 if (!c->g2) {
891 if (c->is_64) {
892 tcg_temp_free_i64(c->u.s64.b);
893 } else {
894 tcg_temp_free_i32(c->u.s32.b);
899 /* ====================================================================== */
900 /* Define the insn format enumeration. */
901 #define F0(N) FMT_##N,
902 #define F1(N, X1) F0(N)
903 #define F2(N, X1, X2) F0(N)
904 #define F3(N, X1, X2, X3) F0(N)
905 #define F4(N, X1, X2, X3, X4) F0(N)
906 #define F5(N, X1, X2, X3, X4, X5) F0(N)
908 typedef enum {
909 #include "insn-format.def"
910 } DisasFormat;
912 #undef F0
913 #undef F1
914 #undef F2
915 #undef F3
916 #undef F4
917 #undef F5
919 /* Define a structure to hold the decoded fields. We'll store each inside
920 an array indexed by an enum. In order to conserve memory, we'll arrange
921 for fields that do not exist at the same time to overlap, thus the "C"
922 for compact. For checking purposes there is an "O" for original index
923 as well that will be applied to availability bitmaps. */
925 enum DisasFieldIndexO {
926 FLD_O_r1,
927 FLD_O_r2,
928 FLD_O_r3,
929 FLD_O_m1,
930 FLD_O_m3,
931 FLD_O_m4,
932 FLD_O_b1,
933 FLD_O_b2,
934 FLD_O_b4,
935 FLD_O_d1,
936 FLD_O_d2,
937 FLD_O_d4,
938 FLD_O_x2,
939 FLD_O_l1,
940 FLD_O_l2,
941 FLD_O_i1,
942 FLD_O_i2,
943 FLD_O_i3,
944 FLD_O_i4,
945 FLD_O_i5
948 enum DisasFieldIndexC {
949 FLD_C_r1 = 0,
950 FLD_C_m1 = 0,
951 FLD_C_b1 = 0,
952 FLD_C_i1 = 0,
954 FLD_C_r2 = 1,
955 FLD_C_b2 = 1,
956 FLD_C_i2 = 1,
958 FLD_C_r3 = 2,
959 FLD_C_m3 = 2,
960 FLD_C_i3 = 2,
962 FLD_C_m4 = 3,
963 FLD_C_b4 = 3,
964 FLD_C_i4 = 3,
965 FLD_C_l1 = 3,
967 FLD_C_i5 = 4,
968 FLD_C_d1 = 4,
970 FLD_C_d2 = 5,
972 FLD_C_d4 = 6,
973 FLD_C_x2 = 6,
974 FLD_C_l2 = 6,
976 NUM_C_FIELD = 7
979 struct DisasFields {
980 unsigned op:8;
981 unsigned op2:8;
982 unsigned presentC:16;
983 unsigned int presentO;
984 int c[NUM_C_FIELD];
987 /* This is the way fields are to be accessed out of DisasFields. */
988 #define have_field(S, F) have_field1((S), FLD_O_##F)
989 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
991 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
993 return (f->presentO >> c) & 1;
996 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
997 enum DisasFieldIndexC c)
999 assert(have_field1(f, o));
1000 return f->c[c];
1003 /* Describe the layout of each field in each format. */
1004 typedef struct DisasField {
1005 unsigned int beg:8;
1006 unsigned int size:8;
1007 unsigned int type:2;
1008 unsigned int indexC:6;
1009 enum DisasFieldIndexO indexO:8;
1010 } DisasField;
1012 typedef struct DisasFormatInfo {
1013 DisasField op[NUM_C_FIELD];
1014 } DisasFormatInfo;
1016 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1017 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1018 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1020 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1022 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1023 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1025 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1028 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1029 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1031 #define F0(N) { { } },
1032 #define F1(N, X1) { { X1 } },
1033 #define F2(N, X1, X2) { { X1, X2 } },
1034 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1035 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1036 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1038 static const DisasFormatInfo format_info[] = {
1039 #include "insn-format.def"
1042 #undef F0
1043 #undef F1
1044 #undef F2
1045 #undef F3
1046 #undef F4
1047 #undef F5
1048 #undef R
1049 #undef M
1050 #undef BD
1051 #undef BXD
1052 #undef BDL
1053 #undef BXDL
1054 #undef I
1055 #undef L
1057 /* Generally, we'll extract operands into this structures, operate upon
1058 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1059 of routines below for more details. */
1060 typedef struct {
1061 bool g_out, g_out2, g_in1, g_in2;
1062 TCGv_i64 out, out2, in1, in2;
1063 TCGv_i64 addr1;
1064 } DisasOps;
1066 /* Instructions can place constraints on their operands, raising specification
1067 exceptions if they are violated. To make this easy to automate, each "in1",
1068 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1069 of the following, or 0. To make this easy to document, we'll put the
1070 SPEC_<name> defines next to <name>. */
1072 #define SPEC_r1_even 1
1073 #define SPEC_r2_even 2
1074 #define SPEC_r3_even 4
1075 #define SPEC_r1_f128 8
1076 #define SPEC_r2_f128 16
1078 /* Return values from translate_one, indicating the state of the TB. */
1079 typedef enum {
1080 /* Continue the TB. */
1081 NO_EXIT,
1082 /* We have emitted one or more goto_tb. No fixup required. */
1083 EXIT_GOTO_TB,
1084 /* We are not using a goto_tb (for whatever reason), but have updated
1085 the PC (for whatever reason), so there's no need to do it again on
1086 exiting the TB. */
1087 EXIT_PC_UPDATED,
1088 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1089 updated the PC for the next instruction to be executed. */
1090 EXIT_PC_STALE,
1091 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1092 No following code will be executed. */
1093 EXIT_NORETURN,
1094 } ExitStatus;
1096 typedef enum DisasFacility {
1097 FAC_Z, /* zarch (default) */
1098 FAC_CASS, /* compare and swap and store */
1099 FAC_CASS2, /* compare and swap and store 2*/
1100 FAC_DFP, /* decimal floating point */
1101 FAC_DFPR, /* decimal floating point rounding */
1102 FAC_DO, /* distinct operands */
1103 FAC_EE, /* execute extensions */
1104 FAC_EI, /* extended immediate */
1105 FAC_FPE, /* floating point extension */
1106 FAC_FPSSH, /* floating point support sign handling */
1107 FAC_FPRGR, /* FPR-GR transfer */
1108 FAC_GIE, /* general instructions extension */
1109 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1110 FAC_HW, /* high-word */
1111 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1112 FAC_LOC, /* load/store on condition */
1113 FAC_LD, /* long displacement */
1114 FAC_PC, /* population count */
1115 FAC_SCF, /* store clock fast */
1116 FAC_SFLE, /* store facility list extended */
1117 } DisasFacility;
1119 struct DisasInsn {
1120 unsigned opc:16;
1121 DisasFormat fmt:8;
1122 DisasFacility fac:8;
1123 unsigned spec:8;
1125 const char *name;
1127 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1128 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1129 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1130 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1131 void (*help_cout)(DisasContext *, DisasOps *);
1132 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1134 uint64_t data;
1137 /* ====================================================================== */
1138 /* Miscellaneous helpers, used by several operations. */
1140 static void help_l2_shift(DisasContext *s, DisasFields *f,
1141 DisasOps *o, int mask)
1143 int b2 = get_field(f, b2);
1144 int d2 = get_field(f, d2);
1146 if (b2 == 0) {
1147 o->in2 = tcg_const_i64(d2 & mask);
1148 } else {
1149 o->in2 = get_address(s, 0, b2, d2);
1150 tcg_gen_andi_i64(o->in2, o->in2, mask);
1154 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1156 if (dest == s->next_pc) {
1157 return NO_EXIT;
1159 if (use_goto_tb(s, dest)) {
1160 update_cc_op(s);
1161 tcg_gen_goto_tb(0);
1162 tcg_gen_movi_i64(psw_addr, dest);
1163 tcg_gen_exit_tb((uintptr_t)s->tb);
1164 return EXIT_GOTO_TB;
1165 } else {
1166 tcg_gen_movi_i64(psw_addr, dest);
1167 return EXIT_PC_UPDATED;
1171 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1172 bool is_imm, int imm, TCGv_i64 cdest)
1174 ExitStatus ret;
1175 uint64_t dest = s->pc + 2 * imm;
1176 int lab;
1178 /* Take care of the special cases first. */
1179 if (c->cond == TCG_COND_NEVER) {
1180 ret = NO_EXIT;
1181 goto egress;
1183 if (is_imm) {
1184 if (dest == s->next_pc) {
1185 /* Branch to next. */
1186 ret = NO_EXIT;
1187 goto egress;
1189 if (c->cond == TCG_COND_ALWAYS) {
1190 ret = help_goto_direct(s, dest);
1191 goto egress;
1193 } else {
1194 if (TCGV_IS_UNUSED_I64(cdest)) {
1195 /* E.g. bcr %r0 -> no branch. */
1196 ret = NO_EXIT;
1197 goto egress;
1199 if (c->cond == TCG_COND_ALWAYS) {
1200 tcg_gen_mov_i64(psw_addr, cdest);
1201 ret = EXIT_PC_UPDATED;
1202 goto egress;
1206 if (use_goto_tb(s, s->next_pc)) {
1207 if (is_imm && use_goto_tb(s, dest)) {
1208 /* Both exits can use goto_tb. */
1209 update_cc_op(s);
1211 lab = gen_new_label();
1212 if (c->is_64) {
1213 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1214 } else {
1215 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1218 /* Branch not taken. */
1219 tcg_gen_goto_tb(0);
1220 tcg_gen_movi_i64(psw_addr, s->next_pc);
1221 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1223 /* Branch taken. */
1224 gen_set_label(lab);
1225 tcg_gen_goto_tb(1);
1226 tcg_gen_movi_i64(psw_addr, dest);
1227 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1229 ret = EXIT_GOTO_TB;
1230 } else {
1231 /* Fallthru can use goto_tb, but taken branch cannot. */
1232 /* Store taken branch destination before the brcond. This
1233 avoids having to allocate a new local temp to hold it.
1234 We'll overwrite this in the not taken case anyway. */
1235 if (!is_imm) {
1236 tcg_gen_mov_i64(psw_addr, cdest);
1239 lab = gen_new_label();
1240 if (c->is_64) {
1241 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1242 } else {
1243 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1246 /* Branch not taken. */
1247 update_cc_op(s);
1248 tcg_gen_goto_tb(0);
1249 tcg_gen_movi_i64(psw_addr, s->next_pc);
1250 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1252 gen_set_label(lab);
1253 if (is_imm) {
1254 tcg_gen_movi_i64(psw_addr, dest);
1256 ret = EXIT_PC_UPDATED;
1258 } else {
1259 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1260 Most commonly we're single-stepping or some other condition that
1261 disables all use of goto_tb. Just update the PC and exit. */
1263 TCGv_i64 next = tcg_const_i64(s->next_pc);
1264 if (is_imm) {
1265 cdest = tcg_const_i64(dest);
1268 if (c->is_64) {
1269 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1270 cdest, next);
1271 } else {
1272 TCGv_i32 t0 = tcg_temp_new_i32();
1273 TCGv_i64 t1 = tcg_temp_new_i64();
1274 TCGv_i64 z = tcg_const_i64(0);
1275 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1276 tcg_gen_extu_i32_i64(t1, t0);
1277 tcg_temp_free_i32(t0);
1278 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1279 tcg_temp_free_i64(t1);
1280 tcg_temp_free_i64(z);
1283 if (is_imm) {
1284 tcg_temp_free_i64(cdest);
1286 tcg_temp_free_i64(next);
1288 ret = EXIT_PC_UPDATED;
1291 egress:
1292 free_compare(c);
1293 return ret;
1296 /* ====================================================================== */
1297 /* The operations. These perform the bulk of the work for any insn,
1298 usually after the operands have been loaded and output initialized. */
1300 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1302 gen_helper_abs_i64(o->out, o->in2);
1303 return NO_EXIT;
1306 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1308 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1309 return NO_EXIT;
1312 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1314 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1315 return NO_EXIT;
1318 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1320 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1321 tcg_gen_mov_i64(o->out2, o->in2);
1322 return NO_EXIT;
1325 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1327 tcg_gen_add_i64(o->out, o->in1, o->in2);
1328 return NO_EXIT;
1331 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1333 DisasCompare cmp;
1334 TCGv_i64 carry;
1336 tcg_gen_add_i64(o->out, o->in1, o->in2);
1338 /* The carry flag is the msb of CC, therefore the branch mask that would
1339 create that comparison is 3. Feeding the generated comparison to
1340 setcond produces the carry flag that we desire. */
1341 disas_jcc(s, &cmp, 3);
1342 carry = tcg_temp_new_i64();
1343 if (cmp.is_64) {
1344 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1345 } else {
1346 TCGv_i32 t = tcg_temp_new_i32();
1347 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1348 tcg_gen_extu_i32_i64(carry, t);
1349 tcg_temp_free_i32(t);
1351 free_compare(&cmp);
1353 tcg_gen_add_i64(o->out, o->out, carry);
1354 tcg_temp_free_i64(carry);
1355 return NO_EXIT;
1358 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1360 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1361 return NO_EXIT;
1364 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1366 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1367 return NO_EXIT;
1370 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1372 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1373 return_low128(o->out2);
1374 return NO_EXIT;
1377 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1379 tcg_gen_and_i64(o->out, o->in1, o->in2);
1380 return NO_EXIT;
1383 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1385 int shift = s->insn->data & 0xff;
1386 int size = s->insn->data >> 8;
1387 uint64_t mask = ((1ull << size) - 1) << shift;
1389 assert(!o->g_in2);
1390 tcg_gen_shli_i64(o->in2, o->in2, shift);
1391 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1392 tcg_gen_and_i64(o->out, o->in1, o->in2);
1394 /* Produce the CC from only the bits manipulated. */
1395 tcg_gen_andi_i64(cc_dst, o->out, mask);
1396 set_cc_nz_u64(s, cc_dst);
1397 return NO_EXIT;
1400 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1402 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1403 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1404 tcg_gen_mov_i64(psw_addr, o->in2);
1405 return EXIT_PC_UPDATED;
1406 } else {
1407 return NO_EXIT;
1411 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1413 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1414 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1417 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1419 int m1 = get_field(s->fields, m1);
1420 bool is_imm = have_field(s->fields, i2);
1421 int imm = is_imm ? get_field(s->fields, i2) : 0;
1422 DisasCompare c;
1424 disas_jcc(s, &c, m1);
1425 return help_branch(s, &c, is_imm, imm, o->in2);
1428 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1430 int r1 = get_field(s->fields, r1);
1431 bool is_imm = have_field(s->fields, i2);
1432 int imm = is_imm ? get_field(s->fields, i2) : 0;
1433 DisasCompare c;
1434 TCGv_i64 t;
1436 c.cond = TCG_COND_NE;
1437 c.is_64 = false;
1438 c.g1 = false;
1439 c.g2 = false;
1441 t = tcg_temp_new_i64();
1442 tcg_gen_subi_i64(t, regs[r1], 1);
1443 store_reg32_i64(r1, t);
1444 c.u.s32.a = tcg_temp_new_i32();
1445 c.u.s32.b = tcg_const_i32(0);
1446 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1447 tcg_temp_free_i64(t);
1449 return help_branch(s, &c, is_imm, imm, o->in2);
1452 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1454 int r1 = get_field(s->fields, r1);
1455 bool is_imm = have_field(s->fields, i2);
1456 int imm = is_imm ? get_field(s->fields, i2) : 0;
1457 DisasCompare c;
1459 c.cond = TCG_COND_NE;
1460 c.is_64 = true;
1461 c.g1 = true;
1462 c.g2 = false;
1464 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1465 c.u.s64.a = regs[r1];
1466 c.u.s64.b = tcg_const_i64(0);
1468 return help_branch(s, &c, is_imm, imm, o->in2);
1471 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1473 int r1 = get_field(s->fields, r1);
1474 int r3 = get_field(s->fields, r3);
1475 bool is_imm = have_field(s->fields, i2);
1476 int imm = is_imm ? get_field(s->fields, i2) : 0;
1477 DisasCompare c;
1478 TCGv_i64 t;
1480 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1481 c.is_64 = false;
1482 c.g1 = false;
1483 c.g2 = false;
1485 t = tcg_temp_new_i64();
1486 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1487 c.u.s32.a = tcg_temp_new_i32();
1488 c.u.s32.b = tcg_temp_new_i32();
1489 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1490 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1491 store_reg32_i64(r1, t);
1492 tcg_temp_free_i64(t);
1494 return help_branch(s, &c, is_imm, imm, o->in2);
1497 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1499 int r1 = get_field(s->fields, r1);
1500 int r3 = get_field(s->fields, r3);
1501 bool is_imm = have_field(s->fields, i2);
1502 int imm = is_imm ? get_field(s->fields, i2) : 0;
1503 DisasCompare c;
1505 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1506 c.is_64 = true;
1508 if (r1 == (r3 | 1)) {
1509 c.u.s64.b = load_reg(r3 | 1);
1510 c.g2 = false;
1511 } else {
1512 c.u.s64.b = regs[r3 | 1];
1513 c.g2 = true;
1516 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1517 c.u.s64.a = regs[r1];
1518 c.g1 = true;
1520 return help_branch(s, &c, is_imm, imm, o->in2);
1523 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1525 int imm, m3 = get_field(s->fields, m3);
1526 bool is_imm;
1527 DisasCompare c;
1529 c.cond = ltgt_cond[m3];
1530 if (s->insn->data) {
1531 c.cond = tcg_unsigned_cond(c.cond);
1533 c.is_64 = c.g1 = c.g2 = true;
1534 c.u.s64.a = o->in1;
1535 c.u.s64.b = o->in2;
1537 is_imm = have_field(s->fields, i4);
1538 if (is_imm) {
1539 imm = get_field(s->fields, i4);
1540 } else {
1541 imm = 0;
1542 o->out = get_address(s, 0, get_field(s->fields, b4),
1543 get_field(s->fields, d4));
1546 return help_branch(s, &c, is_imm, imm, o->out);
1549 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1551 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1552 set_cc_static(s);
1553 return NO_EXIT;
1556 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1558 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1559 set_cc_static(s);
1560 return NO_EXIT;
1563 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1565 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1566 set_cc_static(s);
1567 return NO_EXIT;
1570 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1572 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1573 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1574 tcg_temp_free_i32(m3);
1575 gen_set_cc_nz_f32(s, o->in2);
1576 return NO_EXIT;
1579 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1581 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1582 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1583 tcg_temp_free_i32(m3);
1584 gen_set_cc_nz_f64(s, o->in2);
1585 return NO_EXIT;
1588 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1590 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1591 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1592 tcg_temp_free_i32(m3);
1593 gen_set_cc_nz_f128(s, o->in1, o->in2);
1594 return NO_EXIT;
1597 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1599 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1600 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1601 tcg_temp_free_i32(m3);
1602 gen_set_cc_nz_f32(s, o->in2);
1603 return NO_EXIT;
1606 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1608 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1609 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1610 tcg_temp_free_i32(m3);
1611 gen_set_cc_nz_f64(s, o->in2);
1612 return NO_EXIT;
1615 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1617 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1618 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1619 tcg_temp_free_i32(m3);
1620 gen_set_cc_nz_f128(s, o->in1, o->in2);
1621 return NO_EXIT;
1624 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1626 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1627 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1628 tcg_temp_free_i32(m3);
1629 gen_set_cc_nz_f32(s, o->in2);
1630 return NO_EXIT;
1633 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1635 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1636 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1637 tcg_temp_free_i32(m3);
1638 gen_set_cc_nz_f64(s, o->in2);
1639 return NO_EXIT;
1642 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1644 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1645 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1646 tcg_temp_free_i32(m3);
1647 gen_set_cc_nz_f128(s, o->in1, o->in2);
1648 return NO_EXIT;
1651 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1653 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1654 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1655 tcg_temp_free_i32(m3);
1656 gen_set_cc_nz_f32(s, o->in2);
1657 return NO_EXIT;
1660 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1662 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1663 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1664 tcg_temp_free_i32(m3);
1665 gen_set_cc_nz_f64(s, o->in2);
1666 return NO_EXIT;
1669 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1671 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1672 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1673 tcg_temp_free_i32(m3);
1674 gen_set_cc_nz_f128(s, o->in1, o->in2);
1675 return NO_EXIT;
1678 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1680 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1681 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1682 tcg_temp_free_i32(m3);
1683 return NO_EXIT;
1686 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1688 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1689 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1690 tcg_temp_free_i32(m3);
1691 return NO_EXIT;
1694 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1696 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1697 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1698 tcg_temp_free_i32(m3);
1699 return_low128(o->out2);
1700 return NO_EXIT;
1703 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1705 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1706 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1707 tcg_temp_free_i32(m3);
1708 return NO_EXIT;
1711 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1713 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1714 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1715 tcg_temp_free_i32(m3);
1716 return NO_EXIT;
1719 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1721 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1722 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1723 tcg_temp_free_i32(m3);
1724 return_low128(o->out2);
1725 return NO_EXIT;
1728 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1730 int r2 = get_field(s->fields, r2);
1731 TCGv_i64 len = tcg_temp_new_i64();
1733 potential_page_fault(s);
1734 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1735 set_cc_static(s);
1736 return_low128(o->out);
1738 tcg_gen_add_i64(regs[r2], regs[r2], len);
1739 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1740 tcg_temp_free_i64(len);
1742 return NO_EXIT;
1745 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1747 int l = get_field(s->fields, l1);
1748 TCGv_i32 vl;
1750 switch (l + 1) {
1751 case 1:
1752 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1753 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1754 break;
1755 case 2:
1756 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1757 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1758 break;
1759 case 4:
1760 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1761 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1762 break;
1763 case 8:
1764 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1765 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1766 break;
1767 default:
1768 potential_page_fault(s);
1769 vl = tcg_const_i32(l);
1770 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1771 tcg_temp_free_i32(vl);
1772 set_cc_static(s);
1773 return NO_EXIT;
1775 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1776 return NO_EXIT;
1779 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1781 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1782 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1783 potential_page_fault(s);
1784 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1785 tcg_temp_free_i32(r1);
1786 tcg_temp_free_i32(r3);
1787 set_cc_static(s);
1788 return NO_EXIT;
1791 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1793 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1794 TCGv_i32 t1 = tcg_temp_new_i32();
1795 tcg_gen_trunc_i64_i32(t1, o->in1);
1796 potential_page_fault(s);
1797 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1798 set_cc_static(s);
1799 tcg_temp_free_i32(t1);
1800 tcg_temp_free_i32(m3);
1801 return NO_EXIT;
1804 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1806 potential_page_fault(s);
1807 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1808 set_cc_static(s);
1809 return_low128(o->in2);
1810 return NO_EXIT;
1813 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1815 TCGv_i64 t = tcg_temp_new_i64();
1816 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1817 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1818 tcg_gen_or_i64(o->out, o->out, t);
1819 tcg_temp_free_i64(t);
1820 return NO_EXIT;
1823 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1825 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1826 int d2 = get_field(s->fields, d2);
1827 int b2 = get_field(s->fields, b2);
1828 int is_64 = s->insn->data;
1829 TCGv_i64 addr, mem, cc, z;
1831 /* Note that in1 = R3 (new value) and
1832 in2 = (zero-extended) R1 (expected value). */
1834 /* Load the memory into the (temporary) output. While the PoO only talks
1835 about moving the memory to R1 on inequality, if we include equality it
1836 means that R1 is equal to the memory in all conditions. */
1837 addr = get_address(s, 0, b2, d2);
1838 if (is_64) {
1839 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1840 } else {
1841 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1844 /* Are the memory and expected values (un)equal? Note that this setcond
1845 produces the output CC value, thus the NE sense of the test. */
1846 cc = tcg_temp_new_i64();
1847 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1849 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1850 Recall that we are allowed to unconditionally issue the store (and
1851 thus any possible write trap), so (re-)store the original contents
1852 of MEM in case of inequality. */
1853 z = tcg_const_i64(0);
1854 mem = tcg_temp_new_i64();
1855 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1856 if (is_64) {
1857 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1858 } else {
1859 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1861 tcg_temp_free_i64(z);
1862 tcg_temp_free_i64(mem);
1863 tcg_temp_free_i64(addr);
1865 /* Store CC back to cc_op. Wait until after the store so that any
1866 exception gets the old cc_op value. */
1867 tcg_gen_trunc_i64_i32(cc_op, cc);
1868 tcg_temp_free_i64(cc);
1869 set_cc_static(s);
1870 return NO_EXIT;
1873 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1875 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1876 int r1 = get_field(s->fields, r1);
1877 int r3 = get_field(s->fields, r3);
1878 int d2 = get_field(s->fields, d2);
1879 int b2 = get_field(s->fields, b2);
1880 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1882 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1884 addrh = get_address(s, 0, b2, d2);
1885 addrl = get_address(s, 0, b2, d2 + 8);
1886 outh = tcg_temp_new_i64();
1887 outl = tcg_temp_new_i64();
1889 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1890 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1892 /* Fold the double-word compare with arithmetic. */
1893 cc = tcg_temp_new_i64();
1894 z = tcg_temp_new_i64();
1895 tcg_gen_xor_i64(cc, outh, regs[r1]);
1896 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1897 tcg_gen_or_i64(cc, cc, z);
1898 tcg_gen_movi_i64(z, 0);
1899 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1901 memh = tcg_temp_new_i64();
1902 meml = tcg_temp_new_i64();
1903 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1904 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1905 tcg_temp_free_i64(z);
1907 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1908 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1909 tcg_temp_free_i64(memh);
1910 tcg_temp_free_i64(meml);
1911 tcg_temp_free_i64(addrh);
1912 tcg_temp_free_i64(addrl);
1914 /* Save back state now that we've passed all exceptions. */
1915 tcg_gen_mov_i64(regs[r1], outh);
1916 tcg_gen_mov_i64(regs[r1 + 1], outl);
1917 tcg_gen_trunc_i64_i32(cc_op, cc);
1918 tcg_temp_free_i64(outh);
1919 tcg_temp_free_i64(outl);
1920 tcg_temp_free_i64(cc);
1921 set_cc_static(s);
1922 return NO_EXIT;
1925 #ifndef CONFIG_USER_ONLY
1926 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1928 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1929 check_privileged(s);
1930 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1931 tcg_temp_free_i32(r1);
1932 set_cc_static(s);
1933 return NO_EXIT;
1935 #endif
1937 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1939 TCGv_i64 t1 = tcg_temp_new_i64();
1940 TCGv_i32 t2 = tcg_temp_new_i32();
1941 tcg_gen_trunc_i64_i32(t2, o->in1);
1942 gen_helper_cvd(t1, t2);
1943 tcg_temp_free_i32(t2);
1944 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1945 tcg_temp_free_i64(t1);
1946 return NO_EXIT;
1949 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1951 int m3 = get_field(s->fields, m3);
1952 int lab = gen_new_label();
1953 TCGv_i32 t;
1954 TCGCond c;
1956 c = tcg_invert_cond(ltgt_cond[m3]);
1957 if (s->insn->data) {
1958 c = tcg_unsigned_cond(c);
1960 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1962 /* Set DXC to 0xff. */
1963 t = tcg_temp_new_i32();
1964 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1965 tcg_gen_ori_i32(t, t, 0xff00);
1966 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1967 tcg_temp_free_i32(t);
1969 /* Trap. */
1970 gen_program_exception(s, PGM_DATA);
1972 gen_set_label(lab);
1973 return NO_EXIT;
1976 #ifndef CONFIG_USER_ONLY
1977 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1979 TCGv_i32 tmp;
1981 check_privileged(s);
1982 potential_page_fault(s);
1984 /* We pretend the format is RX_a so that D2 is the field we want. */
1985 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1986 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1987 tcg_temp_free_i32(tmp);
1988 return NO_EXIT;
1990 #endif
1992 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1994 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1995 return_low128(o->out);
1996 return NO_EXIT;
1999 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2001 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2002 return_low128(o->out);
2003 return NO_EXIT;
2006 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2008 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2009 return_low128(o->out);
2010 return NO_EXIT;
2013 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2015 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2016 return_low128(o->out);
2017 return NO_EXIT;
2020 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2022 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2023 return NO_EXIT;
2026 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2028 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2029 return NO_EXIT;
2032 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2034 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2035 return_low128(o->out2);
2036 return NO_EXIT;
2039 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2041 int r2 = get_field(s->fields, r2);
2042 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2043 return NO_EXIT;
2046 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2048 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2049 return NO_EXIT;
2052 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2054 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2055 tb->flags, (ab)use the tb->cs_base field as the address of
2056 the template in memory, and grab 8 bits of tb->flags/cflags for
2057 the contents of the register. We would then recognize all this
2058 in gen_intermediate_code_internal, generating code for exactly
2059 one instruction. This new TB then gets executed normally.
2061 On the other hand, this seems to be mostly used for modifying
2062 MVC inside of memcpy, which needs a helper call anyway. So
2063 perhaps this doesn't bear thinking about any further. */
2065 TCGv_i64 tmp;
2067 update_psw_addr(s);
2068 update_cc_op(s);
2070 tmp = tcg_const_i64(s->next_pc);
2071 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2072 tcg_temp_free_i64(tmp);
2074 set_cc_static(s);
2075 return NO_EXIT;
2078 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2080 /* We'll use the original input for cc computation, since we get to
2081 compare that against 0, which ought to be better than comparing
2082 the real output against 64. It also lets cc_dst be a convenient
2083 temporary during our computation. */
2084 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2086 /* R1 = IN ? CLZ(IN) : 64. */
2087 gen_helper_clz(o->out, o->in2);
2089 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2090 value by 64, which is undefined. But since the shift is 64 iff the
2091 input is zero, we still get the correct result after and'ing. */
2092 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2093 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2094 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2095 return NO_EXIT;
2098 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2100 int m3 = get_field(s->fields, m3);
2101 int pos, len, base = s->insn->data;
2102 TCGv_i64 tmp = tcg_temp_new_i64();
2103 uint64_t ccm;
2105 switch (m3) {
2106 case 0xf:
2107 /* Effectively a 32-bit load. */
2108 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2109 len = 32;
2110 goto one_insert;
2112 case 0xc:
2113 case 0x6:
2114 case 0x3:
2115 /* Effectively a 16-bit load. */
2116 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2117 len = 16;
2118 goto one_insert;
2120 case 0x8:
2121 case 0x4:
2122 case 0x2:
2123 case 0x1:
2124 /* Effectively an 8-bit load. */
2125 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2126 len = 8;
2127 goto one_insert;
2129 one_insert:
2130 pos = base + ctz32(m3) * 8;
2131 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2132 ccm = ((1ull << len) - 1) << pos;
2133 break;
2135 default:
2136 /* This is going to be a sequence of loads and inserts. */
2137 pos = base + 32 - 8;
2138 ccm = 0;
2139 while (m3) {
2140 if (m3 & 0x8) {
2141 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2142 tcg_gen_addi_i64(o->in2, o->in2, 1);
2143 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2144 ccm |= 0xff << pos;
2146 m3 = (m3 << 1) & 0xf;
2147 pos -= 8;
2149 break;
2152 tcg_gen_movi_i64(tmp, ccm);
2153 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2154 tcg_temp_free_i64(tmp);
2155 return NO_EXIT;
2158 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2160 int shift = s->insn->data & 0xff;
2161 int size = s->insn->data >> 8;
2162 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2163 return NO_EXIT;
2166 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2168 TCGv_i64 t1;
2170 gen_op_calc_cc(s);
2171 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2173 t1 = tcg_temp_new_i64();
2174 tcg_gen_shli_i64(t1, psw_mask, 20);
2175 tcg_gen_shri_i64(t1, t1, 36);
2176 tcg_gen_or_i64(o->out, o->out, t1);
2178 tcg_gen_extu_i32_i64(t1, cc_op);
2179 tcg_gen_shli_i64(t1, t1, 28);
2180 tcg_gen_or_i64(o->out, o->out, t1);
2181 tcg_temp_free_i64(t1);
2182 return NO_EXIT;
2185 #ifndef CONFIG_USER_ONLY
2186 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2188 check_privileged(s);
2189 gen_helper_ipte(cpu_env, o->in1, o->in2);
2190 return NO_EXIT;
2193 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2195 check_privileged(s);
2196 gen_helper_iske(o->out, cpu_env, o->in2);
2197 return NO_EXIT;
2199 #endif
2201 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2203 gen_helper_ldeb(o->out, cpu_env, o->in2);
2204 return NO_EXIT;
2207 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2209 gen_helper_ledb(o->out, cpu_env, o->in2);
2210 return NO_EXIT;
2213 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2215 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2216 return NO_EXIT;
2219 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2221 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2222 return NO_EXIT;
2225 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2227 gen_helper_lxdb(o->out, cpu_env, o->in2);
2228 return_low128(o->out2);
2229 return NO_EXIT;
2232 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2234 gen_helper_lxeb(o->out, cpu_env, o->in2);
2235 return_low128(o->out2);
2236 return NO_EXIT;
2239 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2241 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2242 return NO_EXIT;
2245 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2247 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2248 return NO_EXIT;
2251 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2253 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2254 return NO_EXIT;
2257 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2259 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2260 return NO_EXIT;
2263 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2265 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2266 return NO_EXIT;
2269 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2271 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2272 return NO_EXIT;
2275 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2277 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2278 return NO_EXIT;
2281 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2283 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2284 return NO_EXIT;
2287 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2289 DisasCompare c;
2291 disas_jcc(s, &c, get_field(s->fields, m3));
2293 if (c.is_64) {
2294 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2295 o->in2, o->in1);
2296 free_compare(&c);
2297 } else {
2298 TCGv_i32 t32 = tcg_temp_new_i32();
2299 TCGv_i64 t, z;
2301 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2302 free_compare(&c);
2304 t = tcg_temp_new_i64();
2305 tcg_gen_extu_i32_i64(t, t32);
2306 tcg_temp_free_i32(t32);
2308 z = tcg_const_i64(0);
2309 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2310 tcg_temp_free_i64(t);
2311 tcg_temp_free_i64(z);
2314 return NO_EXIT;
2317 #ifndef CONFIG_USER_ONLY
2318 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2320 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2321 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2322 check_privileged(s);
2323 potential_page_fault(s);
2324 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2325 tcg_temp_free_i32(r1);
2326 tcg_temp_free_i32(r3);
2327 return NO_EXIT;
2330 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2332 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2333 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2334 check_privileged(s);
2335 potential_page_fault(s);
2336 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2337 tcg_temp_free_i32(r1);
2338 tcg_temp_free_i32(r3);
2339 return NO_EXIT;
2341 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2343 check_privileged(s);
2344 potential_page_fault(s);
2345 gen_helper_lra(o->out, cpu_env, o->in2);
2346 set_cc_static(s);
2347 return NO_EXIT;
2350 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2352 TCGv_i64 t1, t2;
2354 check_privileged(s);
2356 t1 = tcg_temp_new_i64();
2357 t2 = tcg_temp_new_i64();
2358 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2359 tcg_gen_addi_i64(o->in2, o->in2, 4);
2360 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2361 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2362 tcg_gen_shli_i64(t1, t1, 32);
2363 gen_helper_load_psw(cpu_env, t1, t2);
2364 tcg_temp_free_i64(t1);
2365 tcg_temp_free_i64(t2);
2366 return EXIT_NORETURN;
2369 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2371 TCGv_i64 t1, t2;
2373 check_privileged(s);
2375 t1 = tcg_temp_new_i64();
2376 t2 = tcg_temp_new_i64();
2377 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2378 tcg_gen_addi_i64(o->in2, o->in2, 8);
2379 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2380 gen_helper_load_psw(cpu_env, t1, t2);
2381 tcg_temp_free_i64(t1);
2382 tcg_temp_free_i64(t2);
2383 return EXIT_NORETURN;
2385 #endif
2387 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2389 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2390 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2391 potential_page_fault(s);
2392 gen_helper_lam(cpu_env, r1, o->in2, r3);
2393 tcg_temp_free_i32(r1);
2394 tcg_temp_free_i32(r3);
2395 return NO_EXIT;
2398 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2400 int r1 = get_field(s->fields, r1);
2401 int r3 = get_field(s->fields, r3);
2402 TCGv_i64 t = tcg_temp_new_i64();
2403 TCGv_i64 t4 = tcg_const_i64(4);
2405 while (1) {
2406 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2407 store_reg32_i64(r1, t);
2408 if (r1 == r3) {
2409 break;
2411 tcg_gen_add_i64(o->in2, o->in2, t4);
2412 r1 = (r1 + 1) & 15;
2415 tcg_temp_free_i64(t);
2416 tcg_temp_free_i64(t4);
2417 return NO_EXIT;
2420 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2422 int r1 = get_field(s->fields, r1);
2423 int r3 = get_field(s->fields, r3);
2424 TCGv_i64 t = tcg_temp_new_i64();
2425 TCGv_i64 t4 = tcg_const_i64(4);
2427 while (1) {
2428 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2429 store_reg32h_i64(r1, t);
2430 if (r1 == r3) {
2431 break;
2433 tcg_gen_add_i64(o->in2, o->in2, t4);
2434 r1 = (r1 + 1) & 15;
2437 tcg_temp_free_i64(t);
2438 tcg_temp_free_i64(t4);
2439 return NO_EXIT;
2442 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2444 int r1 = get_field(s->fields, r1);
2445 int r3 = get_field(s->fields, r3);
2446 TCGv_i64 t8 = tcg_const_i64(8);
2448 while (1) {
2449 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2450 if (r1 == r3) {
2451 break;
2453 tcg_gen_add_i64(o->in2, o->in2, t8);
2454 r1 = (r1 + 1) & 15;
2457 tcg_temp_free_i64(t8);
2458 return NO_EXIT;
2461 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2463 o->out = o->in2;
2464 o->g_out = o->g_in2;
2465 TCGV_UNUSED_I64(o->in2);
2466 o->g_in2 = false;
2467 return NO_EXIT;
2470 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2472 o->out = o->in1;
2473 o->out2 = o->in2;
2474 o->g_out = o->g_in1;
2475 o->g_out2 = o->g_in2;
2476 TCGV_UNUSED_I64(o->in1);
2477 TCGV_UNUSED_I64(o->in2);
2478 o->g_in1 = o->g_in2 = false;
2479 return NO_EXIT;
2482 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2484 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2485 potential_page_fault(s);
2486 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2487 tcg_temp_free_i32(l);
2488 return NO_EXIT;
2491 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2493 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2494 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2495 potential_page_fault(s);
2496 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2497 tcg_temp_free_i32(r1);
2498 tcg_temp_free_i32(r2);
2499 set_cc_static(s);
2500 return NO_EXIT;
2503 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2505 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2506 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2507 potential_page_fault(s);
2508 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2509 tcg_temp_free_i32(r1);
2510 tcg_temp_free_i32(r3);
2511 set_cc_static(s);
2512 return NO_EXIT;
2515 #ifndef CONFIG_USER_ONLY
2516 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2518 int r1 = get_field(s->fields, l1);
2519 check_privileged(s);
2520 potential_page_fault(s);
2521 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2522 set_cc_static(s);
2523 return NO_EXIT;
2526 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2528 int r1 = get_field(s->fields, l1);
2529 check_privileged(s);
2530 potential_page_fault(s);
2531 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2532 set_cc_static(s);
2533 return NO_EXIT;
2535 #endif
2537 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2539 potential_page_fault(s);
2540 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2541 set_cc_static(s);
2542 return NO_EXIT;
2545 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2547 potential_page_fault(s);
2548 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2549 set_cc_static(s);
2550 return_low128(o->in2);
2551 return NO_EXIT;
2554 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2556 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2557 return NO_EXIT;
2560 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2562 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2563 return NO_EXIT;
2566 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2568 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2569 return NO_EXIT;
2572 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2574 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2575 return NO_EXIT;
2578 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2580 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2581 return NO_EXIT;
2584 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2586 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2587 return_low128(o->out2);
2588 return NO_EXIT;
2591 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2593 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2594 return_low128(o->out2);
2595 return NO_EXIT;
2598 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2600 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2601 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2602 tcg_temp_free_i64(r3);
2603 return NO_EXIT;
2606 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2608 int r3 = get_field(s->fields, r3);
2609 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2610 return NO_EXIT;
2613 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2615 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2616 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2617 tcg_temp_free_i64(r3);
2618 return NO_EXIT;
2621 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2623 int r3 = get_field(s->fields, r3);
2624 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2625 return NO_EXIT;
2628 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2630 gen_helper_nabs_i64(o->out, o->in2);
2631 return NO_EXIT;
2634 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2636 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2637 return NO_EXIT;
2640 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2642 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2643 return NO_EXIT;
2646 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2648 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2649 tcg_gen_mov_i64(o->out2, o->in2);
2650 return NO_EXIT;
2653 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2655 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2656 potential_page_fault(s);
2657 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2658 tcg_temp_free_i32(l);
2659 set_cc_static(s);
2660 return NO_EXIT;
2663 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2665 tcg_gen_neg_i64(o->out, o->in2);
2666 return NO_EXIT;
2669 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2671 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2672 return NO_EXIT;
2675 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2677 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2678 return NO_EXIT;
2681 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2683 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2684 tcg_gen_mov_i64(o->out2, o->in2);
2685 return NO_EXIT;
2688 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2690 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2691 potential_page_fault(s);
2692 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2693 tcg_temp_free_i32(l);
2694 set_cc_static(s);
2695 return NO_EXIT;
2698 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2700 tcg_gen_or_i64(o->out, o->in1, o->in2);
2701 return NO_EXIT;
2704 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2706 int shift = s->insn->data & 0xff;
2707 int size = s->insn->data >> 8;
2708 uint64_t mask = ((1ull << size) - 1) << shift;
2710 assert(!o->g_in2);
2711 tcg_gen_shli_i64(o->in2, o->in2, shift);
2712 tcg_gen_or_i64(o->out, o->in1, o->in2);
2714 /* Produce the CC from only the bits manipulated. */
2715 tcg_gen_andi_i64(cc_dst, o->out, mask);
2716 set_cc_nz_u64(s, cc_dst);
2717 return NO_EXIT;
2720 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2722 gen_helper_popcnt(o->out, o->in2);
2723 return NO_EXIT;
2726 #ifndef CONFIG_USER_ONLY
2727 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2729 check_privileged(s);
2730 gen_helper_ptlb(cpu_env);
2731 return NO_EXIT;
2733 #endif
2735 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2737 int i3 = get_field(s->fields, i3);
2738 int i4 = get_field(s->fields, i4);
2739 int i5 = get_field(s->fields, i5);
2740 int do_zero = i4 & 0x80;
2741 uint64_t mask, imask, pmask;
2742 int pos, len, rot;
2744 /* Adjust the arguments for the specific insn. */
2745 switch (s->fields->op2) {
2746 case 0x55: /* risbg */
2747 i3 &= 63;
2748 i4 &= 63;
2749 pmask = ~0;
2750 break;
2751 case 0x5d: /* risbhg */
2752 i3 &= 31;
2753 i4 &= 31;
2754 pmask = 0xffffffff00000000ull;
2755 break;
2756 case 0x51: /* risblg */
2757 i3 &= 31;
2758 i4 &= 31;
2759 pmask = 0x00000000ffffffffull;
2760 break;
2761 default:
2762 abort();
2765 /* MASK is the set of bits to be inserted from R2.
2766 Take care for I3/I4 wraparound. */
2767 mask = pmask >> i3;
2768 if (i3 <= i4) {
2769 mask ^= pmask >> i4 >> 1;
2770 } else {
2771 mask |= ~(pmask >> i4 >> 1);
2773 mask &= pmask;
2775 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2776 insns, we need to keep the other half of the register. */
2777 imask = ~mask | ~pmask;
2778 if (do_zero) {
2779 if (s->fields->op2 == 0x55) {
2780 imask = 0;
2781 } else {
2782 imask = ~pmask;
2786 /* In some cases we can implement this with deposit, which can be more
2787 efficient on some hosts. */
2788 if (~mask == imask && i3 <= i4) {
2789 if (s->fields->op2 == 0x5d) {
2790 i3 += 32, i4 += 32;
2792 /* Note that we rotate the bits to be inserted to the lsb, not to
2793 the position as described in the PoO. */
2794 len = i4 - i3 + 1;
2795 pos = 63 - i4;
2796 rot = (i5 - pos) & 63;
2797 } else {
2798 pos = len = -1;
2799 rot = i5 & 63;
2802 /* Rotate the input as necessary. */
2803 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2805 /* Insert the selected bits into the output. */
2806 if (pos >= 0) {
2807 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2808 } else if (imask == 0) {
2809 tcg_gen_andi_i64(o->out, o->in2, mask);
2810 } else {
2811 tcg_gen_andi_i64(o->in2, o->in2, mask);
2812 tcg_gen_andi_i64(o->out, o->out, imask);
2813 tcg_gen_or_i64(o->out, o->out, o->in2);
2815 return NO_EXIT;
2818 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2820 int i3 = get_field(s->fields, i3);
2821 int i4 = get_field(s->fields, i4);
2822 int i5 = get_field(s->fields, i5);
2823 uint64_t mask;
2825 /* If this is a test-only form, arrange to discard the result. */
2826 if (i3 & 0x80) {
2827 o->out = tcg_temp_new_i64();
2828 o->g_out = false;
2831 i3 &= 63;
2832 i4 &= 63;
2833 i5 &= 63;
2835 /* MASK is the set of bits to be operated on from R2.
2836 Take care for I3/I4 wraparound. */
2837 mask = ~0ull >> i3;
2838 if (i3 <= i4) {
2839 mask ^= ~0ull >> i4 >> 1;
2840 } else {
2841 mask |= ~(~0ull >> i4 >> 1);
2844 /* Rotate the input as necessary. */
2845 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2847 /* Operate. */
2848 switch (s->fields->op2) {
2849 case 0x55: /* AND */
2850 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2851 tcg_gen_and_i64(o->out, o->out, o->in2);
2852 break;
2853 case 0x56: /* OR */
2854 tcg_gen_andi_i64(o->in2, o->in2, mask);
2855 tcg_gen_or_i64(o->out, o->out, o->in2);
2856 break;
2857 case 0x57: /* XOR */
2858 tcg_gen_andi_i64(o->in2, o->in2, mask);
2859 tcg_gen_xor_i64(o->out, o->out, o->in2);
2860 break;
2861 default:
2862 abort();
2865 /* Set the CC. */
2866 tcg_gen_andi_i64(cc_dst, o->out, mask);
2867 set_cc_nz_u64(s, cc_dst);
2868 return NO_EXIT;
2871 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2873 tcg_gen_bswap16_i64(o->out, o->in2);
2874 return NO_EXIT;
2877 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2879 tcg_gen_bswap32_i64(o->out, o->in2);
2880 return NO_EXIT;
2883 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2885 tcg_gen_bswap64_i64(o->out, o->in2);
2886 return NO_EXIT;
2889 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2891 TCGv_i32 t1 = tcg_temp_new_i32();
2892 TCGv_i32 t2 = tcg_temp_new_i32();
2893 TCGv_i32 to = tcg_temp_new_i32();
2894 tcg_gen_trunc_i64_i32(t1, o->in1);
2895 tcg_gen_trunc_i64_i32(t2, o->in2);
2896 tcg_gen_rotl_i32(to, t1, t2);
2897 tcg_gen_extu_i32_i64(o->out, to);
2898 tcg_temp_free_i32(t1);
2899 tcg_temp_free_i32(t2);
2900 tcg_temp_free_i32(to);
2901 return NO_EXIT;
2904 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2906 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2907 return NO_EXIT;
2910 #ifndef CONFIG_USER_ONLY
2911 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2913 check_privileged(s);
2914 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2915 set_cc_static(s);
2916 return NO_EXIT;
2919 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2921 check_privileged(s);
2922 gen_helper_sacf(cpu_env, o->in2);
2923 /* Addressing mode has changed, so end the block. */
2924 return EXIT_PC_STALE;
2926 #endif
2928 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2930 int r1 = get_field(s->fields, r1);
2931 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2932 return NO_EXIT;
2935 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2937 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2938 return NO_EXIT;
2941 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2943 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2944 return NO_EXIT;
2947 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2949 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2950 return_low128(o->out2);
2951 return NO_EXIT;
2954 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2956 gen_helper_sqeb(o->out, cpu_env, o->in2);
2957 return NO_EXIT;
2960 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2962 gen_helper_sqdb(o->out, cpu_env, o->in2);
2963 return NO_EXIT;
2966 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2968 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2969 return_low128(o->out2);
2970 return NO_EXIT;
2973 #ifndef CONFIG_USER_ONLY
2974 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2976 check_privileged(s);
2977 potential_page_fault(s);
2978 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2979 set_cc_static(s);
2980 return NO_EXIT;
2983 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2985 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2986 check_privileged(s);
2987 potential_page_fault(s);
2988 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2989 tcg_temp_free_i32(r1);
2990 return NO_EXIT;
2992 #endif
2994 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2996 DisasCompare c;
2997 TCGv_i64 a;
2998 int lab, r1;
3000 disas_jcc(s, &c, get_field(s->fields, m3));
3002 lab = gen_new_label();
3003 if (c.is_64) {
3004 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3005 } else {
3006 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3008 free_compare(&c);
3010 r1 = get_field(s->fields, r1);
3011 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3012 if (s->insn->data) {
3013 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3014 } else {
3015 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3017 tcg_temp_free_i64(a);
3019 gen_set_label(lab);
3020 return NO_EXIT;
3023 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3025 uint64_t sign = 1ull << s->insn->data;
3026 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3027 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3028 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3029 /* The arithmetic left shift is curious in that it does not affect
3030 the sign bit. Copy that over from the source unchanged. */
3031 tcg_gen_andi_i64(o->out, o->out, ~sign);
3032 tcg_gen_andi_i64(o->in1, o->in1, sign);
3033 tcg_gen_or_i64(o->out, o->out, o->in1);
3034 return NO_EXIT;
3037 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3039 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3040 return NO_EXIT;
3043 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3045 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3046 return NO_EXIT;
3049 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3051 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3052 return NO_EXIT;
3055 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3057 gen_helper_sfpc(cpu_env, o->in2);
3058 return NO_EXIT;
3061 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3063 gen_helper_sfas(cpu_env, o->in2);
3064 return NO_EXIT;
3067 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3069 int b2 = get_field(s->fields, b2);
3070 int d2 = get_field(s->fields, d2);
3071 TCGv_i64 t1 = tcg_temp_new_i64();
3072 TCGv_i64 t2 = tcg_temp_new_i64();
3073 int mask, pos, len;
3075 switch (s->fields->op2) {
3076 case 0x99: /* SRNM */
3077 pos = 0, len = 2;
3078 break;
3079 case 0xb8: /* SRNMB */
3080 pos = 0, len = 3;
3081 break;
3082 case 0xb9: /* SRNMT */
3083 pos = 4, len = 3;
3084 break;
3085 default:
3086 tcg_abort();
3088 mask = (1 << len) - 1;
3090 /* Insert the value into the appropriate field of the FPC. */
3091 if (b2 == 0) {
3092 tcg_gen_movi_i64(t1, d2 & mask);
3093 } else {
3094 tcg_gen_addi_i64(t1, regs[b2], d2);
3095 tcg_gen_andi_i64(t1, t1, mask);
3097 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3098 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3099 tcg_temp_free_i64(t1);
3101 /* Then install the new FPC to set the rounding mode in fpu_status. */
3102 gen_helper_sfpc(cpu_env, t2);
3103 tcg_temp_free_i64(t2);
3104 return NO_EXIT;
3107 #ifndef CONFIG_USER_ONLY
3108 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3110 check_privileged(s);
3111 tcg_gen_shri_i64(o->in2, o->in2, 4);
3112 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3113 return NO_EXIT;
3116 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3118 check_privileged(s);
3119 gen_helper_sske(cpu_env, o->in1, o->in2);
3120 return NO_EXIT;
3123 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3125 check_privileged(s);
3126 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3127 return NO_EXIT;
3130 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3132 check_privileged(s);
3133 /* ??? Surely cpu address != cpu number. In any case the previous
3134 version of this stored more than the required half-word, so it
3135 is unlikely this has ever been tested. */
3136 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3137 return NO_EXIT;
3140 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3142 gen_helper_stck(o->out, cpu_env);
3143 /* ??? We don't implement clock states. */
3144 gen_op_movi_cc(s, 0);
3145 return NO_EXIT;
3148 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3150 TCGv_i64 c1 = tcg_temp_new_i64();
3151 TCGv_i64 c2 = tcg_temp_new_i64();
3152 gen_helper_stck(c1, cpu_env);
3153 /* Shift the 64-bit value into its place as a zero-extended
3154 104-bit value. Note that "bit positions 64-103 are always
3155 non-zero so that they compare differently to STCK"; we set
3156 the least significant bit to 1. */
3157 tcg_gen_shli_i64(c2, c1, 56);
3158 tcg_gen_shri_i64(c1, c1, 8);
3159 tcg_gen_ori_i64(c2, c2, 0x10000);
3160 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3161 tcg_gen_addi_i64(o->in2, o->in2, 8);
3162 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3163 tcg_temp_free_i64(c1);
3164 tcg_temp_free_i64(c2);
3165 /* ??? We don't implement clock states. */
3166 gen_op_movi_cc(s, 0);
3167 return NO_EXIT;
3170 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3172 check_privileged(s);
3173 gen_helper_sckc(cpu_env, o->in2);
3174 return NO_EXIT;
3177 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3179 check_privileged(s);
3180 gen_helper_stckc(o->out, cpu_env);
3181 return NO_EXIT;
3184 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3186 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3187 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3188 check_privileged(s);
3189 potential_page_fault(s);
3190 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3191 tcg_temp_free_i32(r1);
3192 tcg_temp_free_i32(r3);
3193 return NO_EXIT;
3196 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3198 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3199 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3200 check_privileged(s);
3201 potential_page_fault(s);
3202 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3203 tcg_temp_free_i32(r1);
3204 tcg_temp_free_i32(r3);
3205 return NO_EXIT;
3208 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3210 check_privileged(s);
3211 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3212 return NO_EXIT;
3215 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3217 check_privileged(s);
3218 gen_helper_spt(cpu_env, o->in2);
3219 return NO_EXIT;
3222 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3224 TCGv_i64 f, a;
3225 /* We really ought to have more complete indication of facilities
3226 that we implement. Address this when STFLE is implemented. */
3227 check_privileged(s);
3228 f = tcg_const_i64(0xc0000000);
3229 a = tcg_const_i64(200);
3230 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3231 tcg_temp_free_i64(f);
3232 tcg_temp_free_i64(a);
3233 return NO_EXIT;
3236 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3238 check_privileged(s);
3239 gen_helper_stpt(o->out, cpu_env);
3240 return NO_EXIT;
3243 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3245 check_privileged(s);
3246 potential_page_fault(s);
3247 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3248 set_cc_static(s);
3249 return NO_EXIT;
3252 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3254 check_privileged(s);
3255 gen_helper_spx(cpu_env, o->in2);
3256 return NO_EXIT;
3259 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3261 check_privileged(s);
3262 /* Not operational. */
3263 gen_op_movi_cc(s, 3);
3264 return NO_EXIT;
3267 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3269 check_privileged(s);
3270 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3271 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3272 return NO_EXIT;
3275 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3277 uint64_t i2 = get_field(s->fields, i2);
3278 TCGv_i64 t;
3280 check_privileged(s);
3282 /* It is important to do what the instruction name says: STORE THEN.
3283 If we let the output hook perform the store then if we fault and
3284 restart, we'll have the wrong SYSTEM MASK in place. */
3285 t = tcg_temp_new_i64();
3286 tcg_gen_shri_i64(t, psw_mask, 56);
3287 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3288 tcg_temp_free_i64(t);
3290 if (s->fields->op == 0xac) {
3291 tcg_gen_andi_i64(psw_mask, psw_mask,
3292 (i2 << 56) | 0x00ffffffffffffffull);
3293 } else {
3294 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3296 return NO_EXIT;
3299 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3301 check_privileged(s);
3302 potential_page_fault(s);
3303 gen_helper_stura(cpu_env, o->in2, o->in1);
3304 return NO_EXIT;
3306 #endif
3308 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3310 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3311 return NO_EXIT;
3314 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3316 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3317 return NO_EXIT;
3320 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3322 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3323 return NO_EXIT;
3326 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3328 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3329 return NO_EXIT;
3332 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3334 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3335 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3336 potential_page_fault(s);
3337 gen_helper_stam(cpu_env, r1, o->in2, r3);
3338 tcg_temp_free_i32(r1);
3339 tcg_temp_free_i32(r3);
3340 return NO_EXIT;
3343 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3345 int m3 = get_field(s->fields, m3);
3346 int pos, base = s->insn->data;
3347 TCGv_i64 tmp = tcg_temp_new_i64();
3349 pos = base + ctz32(m3) * 8;
3350 switch (m3) {
3351 case 0xf:
3352 /* Effectively a 32-bit store. */
3353 tcg_gen_shri_i64(tmp, o->in1, pos);
3354 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3355 break;
3357 case 0xc:
3358 case 0x6:
3359 case 0x3:
3360 /* Effectively a 16-bit store. */
3361 tcg_gen_shri_i64(tmp, o->in1, pos);
3362 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3363 break;
3365 case 0x8:
3366 case 0x4:
3367 case 0x2:
3368 case 0x1:
3369 /* Effectively an 8-bit store. */
3370 tcg_gen_shri_i64(tmp, o->in1, pos);
3371 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3372 break;
3374 default:
3375 /* This is going to be a sequence of shifts and stores. */
3376 pos = base + 32 - 8;
3377 while (m3) {
3378 if (m3 & 0x8) {
3379 tcg_gen_shri_i64(tmp, o->in1, pos);
3380 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3381 tcg_gen_addi_i64(o->in2, o->in2, 1);
3383 m3 = (m3 << 1) & 0xf;
3384 pos -= 8;
3386 break;
3388 tcg_temp_free_i64(tmp);
3389 return NO_EXIT;
3392 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3394 int r1 = get_field(s->fields, r1);
3395 int r3 = get_field(s->fields, r3);
3396 int size = s->insn->data;
3397 TCGv_i64 tsize = tcg_const_i64(size);
3399 while (1) {
3400 if (size == 8) {
3401 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3402 } else {
3403 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3405 if (r1 == r3) {
3406 break;
3408 tcg_gen_add_i64(o->in2, o->in2, tsize);
3409 r1 = (r1 + 1) & 15;
3412 tcg_temp_free_i64(tsize);
3413 return NO_EXIT;
3416 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3418 int r1 = get_field(s->fields, r1);
3419 int r3 = get_field(s->fields, r3);
3420 TCGv_i64 t = tcg_temp_new_i64();
3421 TCGv_i64 t4 = tcg_const_i64(4);
3422 TCGv_i64 t32 = tcg_const_i64(32);
3424 while (1) {
3425 tcg_gen_shl_i64(t, regs[r1], t32);
3426 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3427 if (r1 == r3) {
3428 break;
3430 tcg_gen_add_i64(o->in2, o->in2, t4);
3431 r1 = (r1 + 1) & 15;
3434 tcg_temp_free_i64(t);
3435 tcg_temp_free_i64(t4);
3436 tcg_temp_free_i64(t32);
3437 return NO_EXIT;
3440 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3442 potential_page_fault(s);
3443 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3444 set_cc_static(s);
3445 return_low128(o->in2);
3446 return NO_EXIT;
3449 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3451 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3452 return NO_EXIT;
3455 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3457 DisasCompare cmp;
3458 TCGv_i64 borrow;
3460 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3462 /* The !borrow flag is the msb of CC. Since we want the inverse of
3463 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3464 disas_jcc(s, &cmp, 8 | 4);
3465 borrow = tcg_temp_new_i64();
3466 if (cmp.is_64) {
3467 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3468 } else {
3469 TCGv_i32 t = tcg_temp_new_i32();
3470 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3471 tcg_gen_extu_i32_i64(borrow, t);
3472 tcg_temp_free_i32(t);
3474 free_compare(&cmp);
3476 tcg_gen_sub_i64(o->out, o->out, borrow);
3477 tcg_temp_free_i64(borrow);
3478 return NO_EXIT;
3481 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3483 TCGv_i32 t;
3485 update_psw_addr(s);
3486 update_cc_op(s);
3488 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3489 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3490 tcg_temp_free_i32(t);
3492 t = tcg_const_i32(s->next_pc - s->pc);
3493 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3494 tcg_temp_free_i32(t);
3496 gen_exception(EXCP_SVC);
3497 return EXIT_NORETURN;
3500 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3502 gen_helper_tceb(cc_op, o->in1, o->in2);
3503 set_cc_static(s);
3504 return NO_EXIT;
3507 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3509 gen_helper_tcdb(cc_op, o->in1, o->in2);
3510 set_cc_static(s);
3511 return NO_EXIT;
3514 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3516 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3517 set_cc_static(s);
3518 return NO_EXIT;
3521 #ifndef CONFIG_USER_ONLY
3522 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3524 potential_page_fault(s);
3525 gen_helper_tprot(cc_op, o->addr1, o->in2);
3526 set_cc_static(s);
3527 return NO_EXIT;
3529 #endif
3531 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3533 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3534 potential_page_fault(s);
3535 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3536 tcg_temp_free_i32(l);
3537 set_cc_static(s);
3538 return NO_EXIT;
3541 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3543 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3544 potential_page_fault(s);
3545 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3546 tcg_temp_free_i32(l);
3547 return NO_EXIT;
3550 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3552 int d1 = get_field(s->fields, d1);
3553 int d2 = get_field(s->fields, d2);
3554 int b1 = get_field(s->fields, b1);
3555 int b2 = get_field(s->fields, b2);
3556 int l = get_field(s->fields, l1);
3557 TCGv_i32 t32;
3559 o->addr1 = get_address(s, 0, b1, d1);
3561 /* If the addresses are identical, this is a store/memset of zero. */
3562 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3563 o->in2 = tcg_const_i64(0);
3565 l++;
3566 while (l >= 8) {
3567 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3568 l -= 8;
3569 if (l > 0) {
3570 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3573 if (l >= 4) {
3574 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3575 l -= 4;
3576 if (l > 0) {
3577 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3580 if (l >= 2) {
3581 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3582 l -= 2;
3583 if (l > 0) {
3584 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3587 if (l) {
3588 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3590 gen_op_movi_cc(s, 0);
3591 return NO_EXIT;
3594 /* But in general we'll defer to a helper. */
3595 o->in2 = get_address(s, 0, b2, d2);
3596 t32 = tcg_const_i32(l);
3597 potential_page_fault(s);
3598 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3599 tcg_temp_free_i32(t32);
3600 set_cc_static(s);
3601 return NO_EXIT;
3604 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3606 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3607 return NO_EXIT;
3610 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3612 int shift = s->insn->data & 0xff;
3613 int size = s->insn->data >> 8;
3614 uint64_t mask = ((1ull << size) - 1) << shift;
3616 assert(!o->g_in2);
3617 tcg_gen_shli_i64(o->in2, o->in2, shift);
3618 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3620 /* Produce the CC from only the bits manipulated. */
3621 tcg_gen_andi_i64(cc_dst, o->out, mask);
3622 set_cc_nz_u64(s, cc_dst);
3623 return NO_EXIT;
3626 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3628 o->out = tcg_const_i64(0);
3629 return NO_EXIT;
3632 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3634 o->out = tcg_const_i64(0);
3635 o->out2 = o->out;
3636 o->g_out2 = true;
3637 return NO_EXIT;
3640 /* ====================================================================== */
3641 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3642 the original inputs), update the various cc data structures in order to
3643 be able to compute the new condition code. */
3645 static void cout_abs32(DisasContext *s, DisasOps *o)
3647 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3650 static void cout_abs64(DisasContext *s, DisasOps *o)
3652 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3655 static void cout_adds32(DisasContext *s, DisasOps *o)
3657 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3660 static void cout_adds64(DisasContext *s, DisasOps *o)
3662 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3665 static void cout_addu32(DisasContext *s, DisasOps *o)
3667 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3670 static void cout_addu64(DisasContext *s, DisasOps *o)
3672 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3675 static void cout_addc32(DisasContext *s, DisasOps *o)
3677 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3680 static void cout_addc64(DisasContext *s, DisasOps *o)
3682 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3685 static void cout_cmps32(DisasContext *s, DisasOps *o)
3687 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3690 static void cout_cmps64(DisasContext *s, DisasOps *o)
3692 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3695 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3697 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3700 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3702 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3705 static void cout_f32(DisasContext *s, DisasOps *o)
3707 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3710 static void cout_f64(DisasContext *s, DisasOps *o)
3712 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3715 static void cout_f128(DisasContext *s, DisasOps *o)
3717 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3720 static void cout_nabs32(DisasContext *s, DisasOps *o)
3722 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3725 static void cout_nabs64(DisasContext *s, DisasOps *o)
3727 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3730 static void cout_neg32(DisasContext *s, DisasOps *o)
3732 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3735 static void cout_neg64(DisasContext *s, DisasOps *o)
3737 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3740 static void cout_nz32(DisasContext *s, DisasOps *o)
3742 tcg_gen_ext32u_i64(cc_dst, o->out);
3743 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3746 static void cout_nz64(DisasContext *s, DisasOps *o)
3748 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3751 static void cout_s32(DisasContext *s, DisasOps *o)
3753 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3756 static void cout_s64(DisasContext *s, DisasOps *o)
3758 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3761 static void cout_subs32(DisasContext *s, DisasOps *o)
3763 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3766 static void cout_subs64(DisasContext *s, DisasOps *o)
3768 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3771 static void cout_subu32(DisasContext *s, DisasOps *o)
3773 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3776 static void cout_subu64(DisasContext *s, DisasOps *o)
3778 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3781 static void cout_subb32(DisasContext *s, DisasOps *o)
3783 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3786 static void cout_subb64(DisasContext *s, DisasOps *o)
3788 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3791 static void cout_tm32(DisasContext *s, DisasOps *o)
3793 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3796 static void cout_tm64(DisasContext *s, DisasOps *o)
3798 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3801 /* ====================================================================== */
3802 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3803 with the TCG register to which we will write. Used in combination with
3804 the "wout" generators, in some cases we need a new temporary, and in
3805 some cases we can write to a TCG global. */
3807 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3809 o->out = tcg_temp_new_i64();
3811 #define SPEC_prep_new 0
3813 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3815 o->out = tcg_temp_new_i64();
3816 o->out2 = tcg_temp_new_i64();
3818 #define SPEC_prep_new_P 0
3820 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3822 o->out = regs[get_field(f, r1)];
3823 o->g_out = true;
3825 #define SPEC_prep_r1 0
3827 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3829 int r1 = get_field(f, r1);
3830 o->out = regs[r1];
3831 o->out2 = regs[r1 + 1];
3832 o->g_out = o->g_out2 = true;
3834 #define SPEC_prep_r1_P SPEC_r1_even
3836 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3838 o->out = fregs[get_field(f, r1)];
3839 o->g_out = true;
3841 #define SPEC_prep_f1 0
3843 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3845 int r1 = get_field(f, r1);
3846 o->out = fregs[r1];
3847 o->out2 = fregs[r1 + 2];
3848 o->g_out = o->g_out2 = true;
3850 #define SPEC_prep_x1 SPEC_r1_f128
3852 /* ====================================================================== */
3853 /* The "Write OUTput" generators. These generally perform some non-trivial
3854 copy of data to TCG globals, or to main memory. The trivial cases are
3855 generally handled by having a "prep" generator install the TCG global
3856 as the destination of the operation. */
3858 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3860 store_reg(get_field(f, r1), o->out);
3862 #define SPEC_wout_r1 0
3864 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3866 int r1 = get_field(f, r1);
3867 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3869 #define SPEC_wout_r1_8 0
3871 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3873 int r1 = get_field(f, r1);
3874 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3876 #define SPEC_wout_r1_16 0
3878 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3880 store_reg32_i64(get_field(f, r1), o->out);
3882 #define SPEC_wout_r1_32 0
3884 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3886 int r1 = get_field(f, r1);
3887 store_reg32_i64(r1, o->out);
3888 store_reg32_i64(r1 + 1, o->out2);
3890 #define SPEC_wout_r1_P32 SPEC_r1_even
3892 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3894 int r1 = get_field(f, r1);
3895 store_reg32_i64(r1 + 1, o->out);
3896 tcg_gen_shri_i64(o->out, o->out, 32);
3897 store_reg32_i64(r1, o->out);
3899 #define SPEC_wout_r1_D32 SPEC_r1_even
3901 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3903 store_freg32_i64(get_field(f, r1), o->out);
3905 #define SPEC_wout_e1 0
3907 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3909 store_freg(get_field(f, r1), o->out);
3911 #define SPEC_wout_f1 0
3913 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3915 int f1 = get_field(s->fields, r1);
3916 store_freg(f1, o->out);
3917 store_freg(f1 + 2, o->out2);
3919 #define SPEC_wout_x1 SPEC_r1_f128
3921 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3923 if (get_field(f, r1) != get_field(f, r2)) {
3924 store_reg32_i64(get_field(f, r1), o->out);
3927 #define SPEC_wout_cond_r1r2_32 0
3929 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3931 if (get_field(f, r1) != get_field(f, r2)) {
3932 store_freg32_i64(get_field(f, r1), o->out);
3935 #define SPEC_wout_cond_e1e2 0
3937 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3939 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3941 #define SPEC_wout_m1_8 0
3943 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3945 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3947 #define SPEC_wout_m1_16 0
3949 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3951 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3953 #define SPEC_wout_m1_32 0
3955 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3957 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3959 #define SPEC_wout_m1_64 0
3961 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3963 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3965 #define SPEC_wout_m2_32 0
3967 /* ====================================================================== */
3968 /* The "INput 1" generators. These load the first operand to an insn. */
3970 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3972 o->in1 = load_reg(get_field(f, r1));
3974 #define SPEC_in1_r1 0
3976 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3978 o->in1 = regs[get_field(f, r1)];
3979 o->g_in1 = true;
3981 #define SPEC_in1_r1_o 0
3983 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3985 o->in1 = tcg_temp_new_i64();
3986 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3988 #define SPEC_in1_r1_32s 0
3990 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3992 o->in1 = tcg_temp_new_i64();
3993 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3995 #define SPEC_in1_r1_32u 0
3997 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3999 o->in1 = tcg_temp_new_i64();
4000 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4002 #define SPEC_in1_r1_sr32 0
4004 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4006 o->in1 = load_reg(get_field(f, r1) + 1);
4008 #define SPEC_in1_r1p1 SPEC_r1_even
4010 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4012 o->in1 = tcg_temp_new_i64();
4013 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4015 #define SPEC_in1_r1p1_32s SPEC_r1_even
4017 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4019 o->in1 = tcg_temp_new_i64();
4020 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4022 #define SPEC_in1_r1p1_32u SPEC_r1_even
4024 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4026 int r1 = get_field(f, r1);
4027 o->in1 = tcg_temp_new_i64();
4028 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4030 #define SPEC_in1_r1_D32 SPEC_r1_even
4032 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4034 o->in1 = load_reg(get_field(f, r2));
4036 #define SPEC_in1_r2 0
4038 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4040 o->in1 = load_reg(get_field(f, r3));
4042 #define SPEC_in1_r3 0
4044 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4046 o->in1 = regs[get_field(f, r3)];
4047 o->g_in1 = true;
4049 #define SPEC_in1_r3_o 0
4051 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4053 o->in1 = tcg_temp_new_i64();
4054 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4056 #define SPEC_in1_r3_32s 0
4058 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4060 o->in1 = tcg_temp_new_i64();
4061 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4063 #define SPEC_in1_r3_32u 0
4065 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4067 int r3 = get_field(f, r3);
4068 o->in1 = tcg_temp_new_i64();
4069 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4071 #define SPEC_in1_r3_D32 SPEC_r3_even
4073 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4075 o->in1 = load_freg32_i64(get_field(f, r1));
4077 #define SPEC_in1_e1 0
4079 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4081 o->in1 = fregs[get_field(f, r1)];
4082 o->g_in1 = true;
4084 #define SPEC_in1_f1_o 0
4086 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4088 int r1 = get_field(f, r1);
4089 o->out = fregs[r1];
4090 o->out2 = fregs[r1 + 2];
4091 o->g_out = o->g_out2 = true;
4093 #define SPEC_in1_x1_o SPEC_r1_f128
4095 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4097 o->in1 = fregs[get_field(f, r3)];
4098 o->g_in1 = true;
4100 #define SPEC_in1_f3_o 0
4102 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4104 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4106 #define SPEC_in1_la1 0
4108 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4110 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4111 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4113 #define SPEC_in1_la2 0
4115 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4117 in1_la1(s, f, o);
4118 o->in1 = tcg_temp_new_i64();
4119 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4121 #define SPEC_in1_m1_8u 0
4123 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4125 in1_la1(s, f, o);
4126 o->in1 = tcg_temp_new_i64();
4127 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4129 #define SPEC_in1_m1_16s 0
4131 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4133 in1_la1(s, f, o);
4134 o->in1 = tcg_temp_new_i64();
4135 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4137 #define SPEC_in1_m1_16u 0
4139 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4141 in1_la1(s, f, o);
4142 o->in1 = tcg_temp_new_i64();
4143 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4145 #define SPEC_in1_m1_32s 0
4147 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4149 in1_la1(s, f, o);
4150 o->in1 = tcg_temp_new_i64();
4151 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4153 #define SPEC_in1_m1_32u 0
4155 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4157 in1_la1(s, f, o);
4158 o->in1 = tcg_temp_new_i64();
4159 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4161 #define SPEC_in1_m1_64 0
4163 /* ====================================================================== */
4164 /* The "INput 2" generators. These load the second operand to an insn. */
4166 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4168 o->in2 = regs[get_field(f, r1)];
4169 o->g_in2 = true;
4171 #define SPEC_in2_r1_o 0
4173 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4175 o->in2 = tcg_temp_new_i64();
4176 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4178 #define SPEC_in2_r1_16u 0
4180 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4182 o->in2 = tcg_temp_new_i64();
4183 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4185 #define SPEC_in2_r1_32u 0
4187 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4189 int r1 = get_field(f, r1);
4190 o->in2 = tcg_temp_new_i64();
4191 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4193 #define SPEC_in2_r1_D32 SPEC_r1_even
4195 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4197 o->in2 = load_reg(get_field(f, r2));
4199 #define SPEC_in2_r2 0
4201 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4203 o->in2 = regs[get_field(f, r2)];
4204 o->g_in2 = true;
4206 #define SPEC_in2_r2_o 0
4208 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4210 int r2 = get_field(f, r2);
4211 if (r2 != 0) {
4212 o->in2 = load_reg(r2);
4215 #define SPEC_in2_r2_nz 0
4217 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4219 o->in2 = tcg_temp_new_i64();
4220 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4222 #define SPEC_in2_r2_8s 0
4224 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4226 o->in2 = tcg_temp_new_i64();
4227 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4229 #define SPEC_in2_r2_8u 0
4231 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4233 o->in2 = tcg_temp_new_i64();
4234 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4236 #define SPEC_in2_r2_16s 0
4238 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4240 o->in2 = tcg_temp_new_i64();
4241 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4243 #define SPEC_in2_r2_16u 0
4245 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4247 o->in2 = load_reg(get_field(f, r3));
4249 #define SPEC_in2_r3 0
4251 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4253 o->in2 = tcg_temp_new_i64();
4254 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4256 #define SPEC_in2_r2_32s 0
4258 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4260 o->in2 = tcg_temp_new_i64();
4261 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4263 #define SPEC_in2_r2_32u 0
4265 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4267 o->in2 = load_freg32_i64(get_field(f, r2));
4269 #define SPEC_in2_e2 0
4271 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4273 o->in2 = fregs[get_field(f, r2)];
4274 o->g_in2 = true;
4276 #define SPEC_in2_f2_o 0
4278 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4280 int r2 = get_field(f, r2);
4281 o->in1 = fregs[r2];
4282 o->in2 = fregs[r2 + 2];
4283 o->g_in1 = o->g_in2 = true;
4285 #define SPEC_in2_x2_o SPEC_r2_f128
4287 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4289 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4291 #define SPEC_in2_ra2 0
4293 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4295 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4296 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4298 #define SPEC_in2_a2 0
4300 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4302 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4304 #define SPEC_in2_ri2 0
4306 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4308 help_l2_shift(s, f, o, 31);
4310 #define SPEC_in2_sh32 0
4312 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4314 help_l2_shift(s, f, o, 63);
4316 #define SPEC_in2_sh64 0
4318 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4320 in2_a2(s, f, o);
4321 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4323 #define SPEC_in2_m2_8u 0
4325 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4327 in2_a2(s, f, o);
4328 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4330 #define SPEC_in2_m2_16s 0
4332 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4334 in2_a2(s, f, o);
4335 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4337 #define SPEC_in2_m2_16u 0
4339 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4341 in2_a2(s, f, o);
4342 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4344 #define SPEC_in2_m2_32s 0
4346 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4348 in2_a2(s, f, o);
4349 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4351 #define SPEC_in2_m2_32u 0
4353 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4355 in2_a2(s, f, o);
4356 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4358 #define SPEC_in2_m2_64 0
4360 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4362 in2_ri2(s, f, o);
4363 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4365 #define SPEC_in2_mri2_16u 0
4367 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4369 in2_ri2(s, f, o);
4370 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4372 #define SPEC_in2_mri2_32s 0
4374 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4376 in2_ri2(s, f, o);
4377 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4379 #define SPEC_in2_mri2_32u 0
4381 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4383 in2_ri2(s, f, o);
4384 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4386 #define SPEC_in2_mri2_64 0
4388 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4390 o->in2 = tcg_const_i64(get_field(f, i2));
4392 #define SPEC_in2_i2 0
4394 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4396 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4398 #define SPEC_in2_i2_8u 0
4400 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4402 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4404 #define SPEC_in2_i2_16u 0
4406 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4408 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4410 #define SPEC_in2_i2_32u 0
4412 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4414 uint64_t i2 = (uint16_t)get_field(f, i2);
4415 o->in2 = tcg_const_i64(i2 << s->insn->data);
4417 #define SPEC_in2_i2_16u_shl 0
4419 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4421 uint64_t i2 = (uint32_t)get_field(f, i2);
4422 o->in2 = tcg_const_i64(i2 << s->insn->data);
4424 #define SPEC_in2_i2_32u_shl 0
4426 /* ====================================================================== */
4428 /* Find opc within the table of insns. This is formulated as a switch
4429 statement so that (1) we get compile-time notice of cut-paste errors
4430 for duplicated opcodes, and (2) the compiler generates the binary
4431 search tree, rather than us having to post-process the table. */
4433 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4434 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4436 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4438 enum DisasInsnEnum {
4439 #include "insn-data.def"
4442 #undef D
4443 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4444 .opc = OPC, \
4445 .fmt = FMT_##FT, \
4446 .fac = FAC_##FC, \
4447 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4448 .name = #NM, \
4449 .help_in1 = in1_##I1, \
4450 .help_in2 = in2_##I2, \
4451 .help_prep = prep_##P, \
4452 .help_wout = wout_##W, \
4453 .help_cout = cout_##CC, \
4454 .help_op = op_##OP, \
4455 .data = D \
4458 /* Allow 0 to be used for NULL in the table below. */
4459 #define in1_0 NULL
4460 #define in2_0 NULL
4461 #define prep_0 NULL
4462 #define wout_0 NULL
4463 #define cout_0 NULL
4464 #define op_0 NULL
4466 #define SPEC_in1_0 0
4467 #define SPEC_in2_0 0
4468 #define SPEC_prep_0 0
4469 #define SPEC_wout_0 0
4471 static const DisasInsn insn_info[] = {
4472 #include "insn-data.def"
4475 #undef D
4476 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4477 case OPC: return &insn_info[insn_ ## NM];
4479 static const DisasInsn *lookup_opc(uint16_t opc)
4481 switch (opc) {
4482 #include "insn-data.def"
4483 default:
4484 return NULL;
4488 #undef D
4489 #undef C
4491 /* Extract a field from the insn. The INSN should be left-aligned in
4492 the uint64_t so that we can more easily utilize the big-bit-endian
4493 definitions we extract from the Principals of Operation. */
4495 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4497 uint32_t r, m;
4499 if (f->size == 0) {
4500 return;
4503 /* Zero extract the field from the insn. */
4504 r = (insn << f->beg) >> (64 - f->size);
4506 /* Sign-extend, or un-swap the field as necessary. */
4507 switch (f->type) {
4508 case 0: /* unsigned */
4509 break;
4510 case 1: /* signed */
4511 assert(f->size <= 32);
4512 m = 1u << (f->size - 1);
4513 r = (r ^ m) - m;
4514 break;
4515 case 2: /* dl+dh split, signed 20 bit. */
4516 r = ((int8_t)r << 12) | (r >> 8);
4517 break;
4518 default:
4519 abort();
4522 /* Validate that the "compressed" encoding we selected above is valid.
4523 I.e. we havn't make two different original fields overlap. */
4524 assert(((o->presentC >> f->indexC) & 1) == 0);
4525 o->presentC |= 1 << f->indexC;
4526 o->presentO |= 1 << f->indexO;
4528 o->c[f->indexC] = r;
4531 /* Lookup the insn at the current PC, extracting the operands into O and
4532 returning the info struct for the insn. Returns NULL for invalid insn. */
4534 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4535 DisasFields *f)
4537 uint64_t insn, pc = s->pc;
4538 int op, op2, ilen;
4539 const DisasInsn *info;
4541 insn = ld_code2(env, pc);
4542 op = (insn >> 8) & 0xff;
4543 ilen = get_ilen(op);
4544 s->next_pc = s->pc + ilen;
4546 switch (ilen) {
4547 case 2:
4548 insn = insn << 48;
4549 break;
4550 case 4:
4551 insn = ld_code4(env, pc) << 32;
4552 break;
4553 case 6:
4554 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4555 break;
4556 default:
4557 abort();
4560 /* We can't actually determine the insn format until we've looked up
4561 the full insn opcode. Which we can't do without locating the
4562 secondary opcode. Assume by default that OP2 is at bit 40; for
4563 those smaller insns that don't actually have a secondary opcode
4564 this will correctly result in OP2 = 0. */
4565 switch (op) {
4566 case 0x01: /* E */
4567 case 0x80: /* S */
4568 case 0x82: /* S */
4569 case 0x93: /* S */
4570 case 0xb2: /* S, RRF, RRE */
4571 case 0xb3: /* RRE, RRD, RRF */
4572 case 0xb9: /* RRE, RRF */
4573 case 0xe5: /* SSE, SIL */
4574 op2 = (insn << 8) >> 56;
4575 break;
4576 case 0xa5: /* RI */
4577 case 0xa7: /* RI */
4578 case 0xc0: /* RIL */
4579 case 0xc2: /* RIL */
4580 case 0xc4: /* RIL */
4581 case 0xc6: /* RIL */
4582 case 0xc8: /* SSF */
4583 case 0xcc: /* RIL */
4584 op2 = (insn << 12) >> 60;
4585 break;
4586 case 0xd0 ... 0xdf: /* SS */
4587 case 0xe1: /* SS */
4588 case 0xe2: /* SS */
4589 case 0xe8: /* SS */
4590 case 0xe9: /* SS */
4591 case 0xea: /* SS */
4592 case 0xee ... 0xf3: /* SS */
4593 case 0xf8 ... 0xfd: /* SS */
4594 op2 = 0;
4595 break;
4596 default:
4597 op2 = (insn << 40) >> 56;
4598 break;
4601 memset(f, 0, sizeof(*f));
4602 f->op = op;
4603 f->op2 = op2;
4605 /* Lookup the instruction. */
4606 info = lookup_opc(op << 8 | op2);
4608 /* If we found it, extract the operands. */
4609 if (info != NULL) {
4610 DisasFormat fmt = info->fmt;
4611 int i;
4613 for (i = 0; i < NUM_C_FIELD; ++i) {
4614 extract_field(f, &format_info[fmt].op[i], insn);
4617 return info;
4620 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4622 const DisasInsn *insn;
4623 ExitStatus ret = NO_EXIT;
4624 DisasFields f;
4625 DisasOps o;
4627 /* Search for the insn in the table. */
4628 insn = extract_insn(env, s, &f);
4630 /* Not found means unimplemented/illegal opcode. */
4631 if (insn == NULL) {
4632 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4633 f.op, f.op2);
4634 gen_illegal_opcode(s);
4635 return EXIT_NORETURN;
4638 /* Check for insn specification exceptions. */
4639 if (insn->spec) {
4640 int spec = insn->spec, excp = 0, r;
4642 if (spec & SPEC_r1_even) {
4643 r = get_field(&f, r1);
4644 if (r & 1) {
4645 excp = PGM_SPECIFICATION;
4648 if (spec & SPEC_r2_even) {
4649 r = get_field(&f, r2);
4650 if (r & 1) {
4651 excp = PGM_SPECIFICATION;
4654 if (spec & SPEC_r3_even) {
4655 r = get_field(&f, r3);
4656 if (r & 1) {
4657 excp = PGM_SPECIFICATION;
4660 if (spec & SPEC_r1_f128) {
4661 r = get_field(&f, r1);
4662 if (r > 13) {
4663 excp = PGM_SPECIFICATION;
4666 if (spec & SPEC_r2_f128) {
4667 r = get_field(&f, r2);
4668 if (r > 13) {
4669 excp = PGM_SPECIFICATION;
4672 if (excp) {
4673 gen_program_exception(s, excp);
4674 return EXIT_NORETURN;
4678 /* Set up the strutures we use to communicate with the helpers. */
4679 s->insn = insn;
4680 s->fields = &f;
4681 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4682 TCGV_UNUSED_I64(o.out);
4683 TCGV_UNUSED_I64(o.out2);
4684 TCGV_UNUSED_I64(o.in1);
4685 TCGV_UNUSED_I64(o.in2);
4686 TCGV_UNUSED_I64(o.addr1);
4688 /* Implement the instruction. */
4689 if (insn->help_in1) {
4690 insn->help_in1(s, &f, &o);
4692 if (insn->help_in2) {
4693 insn->help_in2(s, &f, &o);
4695 if (insn->help_prep) {
4696 insn->help_prep(s, &f, &o);
4698 if (insn->help_op) {
4699 ret = insn->help_op(s, &o);
4701 if (insn->help_wout) {
4702 insn->help_wout(s, &f, &o);
4704 if (insn->help_cout) {
4705 insn->help_cout(s, &o);
4708 /* Free any temporaries created by the helpers. */
4709 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4710 tcg_temp_free_i64(o.out);
4712 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4713 tcg_temp_free_i64(o.out2);
4715 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4716 tcg_temp_free_i64(o.in1);
4718 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4719 tcg_temp_free_i64(o.in2);
4721 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4722 tcg_temp_free_i64(o.addr1);
4725 /* Advance to the next instruction. */
4726 s->pc = s->next_pc;
4727 return ret;
4730 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4731 TranslationBlock *tb,
4732 bool search_pc)
4734 CPUState *cs = CPU(cpu);
4735 CPUS390XState *env = &cpu->env;
4736 DisasContext dc;
4737 target_ulong pc_start;
4738 uint64_t next_page_start;
4739 uint16_t *gen_opc_end;
4740 int j, lj = -1;
4741 int num_insns, max_insns;
4742 CPUBreakpoint *bp;
4743 ExitStatus status;
4744 bool do_debug;
4746 pc_start = tb->pc;
4748 /* 31-bit mode */
4749 if (!(tb->flags & FLAG_MASK_64)) {
4750 pc_start &= 0x7fffffff;
4753 dc.tb = tb;
4754 dc.pc = pc_start;
4755 dc.cc_op = CC_OP_DYNAMIC;
4756 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4758 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4760 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4762 num_insns = 0;
4763 max_insns = tb->cflags & CF_COUNT_MASK;
4764 if (max_insns == 0) {
4765 max_insns = CF_COUNT_MASK;
4768 gen_tb_start();
4770 do {
4771 if (search_pc) {
4772 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4773 if (lj < j) {
4774 lj++;
4775 while (lj < j) {
4776 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4779 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4780 gen_opc_cc_op[lj] = dc.cc_op;
4781 tcg_ctx.gen_opc_instr_start[lj] = 1;
4782 tcg_ctx.gen_opc_icount[lj] = num_insns;
4784 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4785 gen_io_start();
4788 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4789 tcg_gen_debug_insn_start(dc.pc);
4792 status = NO_EXIT;
4793 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4794 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4795 if (bp->pc == dc.pc) {
4796 status = EXIT_PC_STALE;
4797 do_debug = true;
4798 break;
4802 if (status == NO_EXIT) {
4803 status = translate_one(env, &dc);
4806 /* If we reach a page boundary, are single stepping,
4807 or exhaust instruction count, stop generation. */
4808 if (status == NO_EXIT
4809 && (dc.pc >= next_page_start
4810 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4811 || num_insns >= max_insns
4812 || singlestep
4813 || cs->singlestep_enabled)) {
4814 status = EXIT_PC_STALE;
4816 } while (status == NO_EXIT);
4818 if (tb->cflags & CF_LAST_IO) {
4819 gen_io_end();
4822 switch (status) {
4823 case EXIT_GOTO_TB:
4824 case EXIT_NORETURN:
4825 break;
4826 case EXIT_PC_STALE:
4827 update_psw_addr(&dc);
4828 /* FALLTHRU */
4829 case EXIT_PC_UPDATED:
4830 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4831 cc op type is in env */
4832 update_cc_op(&dc);
4833 /* Exit the TB, either by raising a debug exception or by return. */
4834 if (do_debug) {
4835 gen_exception(EXCP_DEBUG);
4836 } else {
4837 tcg_gen_exit_tb(0);
4839 break;
4840 default:
4841 abort();
4844 gen_tb_end(tb, num_insns);
4845 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4846 if (search_pc) {
4847 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4848 lj++;
4849 while (lj <= j) {
4850 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4852 } else {
4853 tb->size = dc.pc - pc_start;
4854 tb->icount = num_insns;
4857 #if defined(S390X_DEBUG_DISAS)
4858 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4859 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4860 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4861 qemu_log("\n");
4863 #endif
4866 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4868 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
4871 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4873 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
4876 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4878 int cc_op;
4879 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4880 cc_op = gen_opc_cc_op[pc_pos];
4881 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4882 env->cc_op = cc_op;