target-alpha: Fix cvttq vs inf
[qemu/ar7.git] / target-s390x / translate.c
blob80e3a545e477c48f12031dbe48b4a70ee00569fe
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
74 #define DISAS_EXCP 4
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
88 return pc;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
124 #ifndef CONFIG_USER_ONLY
125 for (i = 0; i < 16; i++) {
126 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
127 if ((i % 4) == 3) {
128 cpu_fprintf(f, "\n");
129 } else {
130 cpu_fprintf(f, " ");
133 #endif
135 #ifdef DEBUG_INLINE_BRANCHES
136 for (i = 0; i < CC_OP_MAX; i++) {
137 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
138 inline_branch_miss[i], inline_branch_hit[i]);
140 #endif
142 cpu_fprintf(f, "\n");
145 static TCGv_i64 psw_addr;
146 static TCGv_i64 psw_mask;
148 static TCGv_i32 cc_op;
149 static TCGv_i64 cc_src;
150 static TCGv_i64 cc_dst;
151 static TCGv_i64 cc_vr;
153 static char cpu_reg_names[32][4];
154 static TCGv_i64 regs[16];
155 static TCGv_i64 fregs[16];
157 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
159 void s390x_translate_init(void)
161 int i;
163 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
164 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
165 offsetof(CPUS390XState, psw.addr),
166 "psw_addr");
167 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
168 offsetof(CPUS390XState, psw.mask),
169 "psw_mask");
171 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
172 "cc_op");
173 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
174 "cc_src");
175 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
176 "cc_dst");
177 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
178 "cc_vr");
180 for (i = 0; i < 16; i++) {
181 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
182 regs[i] = tcg_global_mem_new(TCG_AREG0,
183 offsetof(CPUS390XState, regs[i]),
184 cpu_reg_names[i]);
187 for (i = 0; i < 16; i++) {
188 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
189 fregs[i] = tcg_global_mem_new(TCG_AREG0,
190 offsetof(CPUS390XState, fregs[i].d),
191 cpu_reg_names[i + 16]);
195 static TCGv_i64 load_reg(int reg)
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
202 static TCGv_i64 load_freg32_i64(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
209 static void store_reg(int reg, TCGv_i64 v)
211 tcg_gen_mov_i64(regs[reg], v);
214 static void store_freg(int reg, TCGv_i64 v)
216 tcg_gen_mov_i64(fregs[reg], v);
219 static void store_reg32_i64(int reg, TCGv_i64 v)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
230 static void store_freg32_i64(int reg, TCGv_i64 v)
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
235 static void return_low128(TCGv_i64 dest)
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
240 static void update_psw_addr(DisasContext *s)
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
246 static void update_cc_op(DisasContext *s)
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
253 static void potential_page_fault(DisasContext *s)
255 update_psw_addr(s);
256 update_cc_op(s);
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
261 return (uint64_t)cpu_lduw_code(env, pc);
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
269 static int get_mem_index(DisasContext *s)
271 switch (s->tb->flags & FLAG_MASK_ASC) {
272 case PSW_ASC_PRIMARY >> 32:
273 return 0;
274 case PSW_ASC_SECONDARY >> 32:
275 return 1;
276 case PSW_ASC_HOME >> 32:
277 return 2;
278 default:
279 tcg_abort();
280 break;
284 static void gen_exception(int excp)
286 TCGv_i32 tmp = tcg_const_i32(excp);
287 gen_helper_exception(cpu_env, tmp);
288 tcg_temp_free_i32(tmp);
291 static void gen_program_exception(DisasContext *s, int code)
293 TCGv_i32 tmp;
295 /* Remember what pgm exeption this was. */
296 tmp = tcg_const_i32(code);
297 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
298 tcg_temp_free_i32(tmp);
300 tmp = tcg_const_i32(s->next_pc - s->pc);
301 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
302 tcg_temp_free_i32(tmp);
304 /* Advance past instruction. */
305 s->pc = s->next_pc;
306 update_psw_addr(s);
308 /* Save off cc. */
309 update_cc_op(s);
311 /* Trigger exception. */
312 gen_exception(EXCP_PGM);
315 static inline void gen_illegal_opcode(DisasContext *s)
317 gen_program_exception(s, PGM_SPECIFICATION);
320 #ifndef CONFIG_USER_ONLY
321 static void check_privileged(DisasContext *s)
323 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
324 gen_program_exception(s, PGM_PRIVILEGED);
327 #endif
329 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
331 TCGv_i64 tmp = tcg_temp_new_i64();
332 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
334 /* Note that d2 is limited to 20 bits, signed. If we crop negative
335 displacements early we create larger immedate addends. */
337 /* Note that addi optimizes the imm==0 case. */
338 if (b2 && x2) {
339 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
340 tcg_gen_addi_i64(tmp, tmp, d2);
341 } else if (b2) {
342 tcg_gen_addi_i64(tmp, regs[b2], d2);
343 } else if (x2) {
344 tcg_gen_addi_i64(tmp, regs[x2], d2);
345 } else {
346 if (need_31) {
347 d2 &= 0x7fffffff;
348 need_31 = false;
350 tcg_gen_movi_i64(tmp, d2);
352 if (need_31) {
353 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
356 return tmp;
359 static inline bool live_cc_data(DisasContext *s)
361 return (s->cc_op != CC_OP_DYNAMIC
362 && s->cc_op != CC_OP_STATIC
363 && s->cc_op > 3);
366 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
368 if (live_cc_data(s)) {
369 tcg_gen_discard_i64(cc_src);
370 tcg_gen_discard_i64(cc_dst);
371 tcg_gen_discard_i64(cc_vr);
373 s->cc_op = CC_OP_CONST0 + val;
376 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
378 if (live_cc_data(s)) {
379 tcg_gen_discard_i64(cc_src);
380 tcg_gen_discard_i64(cc_vr);
382 tcg_gen_mov_i64(cc_dst, dst);
383 s->cc_op = op;
386 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
387 TCGv_i64 dst)
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_vr);
392 tcg_gen_mov_i64(cc_src, src);
393 tcg_gen_mov_i64(cc_dst, dst);
394 s->cc_op = op;
397 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
398 TCGv_i64 dst, TCGv_i64 vr)
400 tcg_gen_mov_i64(cc_src, src);
401 tcg_gen_mov_i64(cc_dst, dst);
402 tcg_gen_mov_i64(cc_vr, vr);
403 s->cc_op = op;
406 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
408 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
411 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
413 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
416 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
418 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
421 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
423 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
426 /* CC value is in env->cc_op */
427 static void set_cc_static(DisasContext *s)
429 if (live_cc_data(s)) {
430 tcg_gen_discard_i64(cc_src);
431 tcg_gen_discard_i64(cc_dst);
432 tcg_gen_discard_i64(cc_vr);
434 s->cc_op = CC_OP_STATIC;
437 /* calculates cc into cc_op */
438 static void gen_op_calc_cc(DisasContext *s)
440 TCGv_i32 local_cc_op;
441 TCGv_i64 dummy;
443 TCGV_UNUSED_I32(local_cc_op);
444 TCGV_UNUSED_I64(dummy);
445 switch (s->cc_op) {
446 default:
447 dummy = tcg_const_i64(0);
448 /* FALLTHRU */
449 case CC_OP_ADD_64:
450 case CC_OP_ADDU_64:
451 case CC_OP_ADDC_64:
452 case CC_OP_SUB_64:
453 case CC_OP_SUBU_64:
454 case CC_OP_SUBB_64:
455 case CC_OP_ADD_32:
456 case CC_OP_ADDU_32:
457 case CC_OP_ADDC_32:
458 case CC_OP_SUB_32:
459 case CC_OP_SUBU_32:
460 case CC_OP_SUBB_32:
461 local_cc_op = tcg_const_i32(s->cc_op);
462 break;
463 case CC_OP_CONST0:
464 case CC_OP_CONST1:
465 case CC_OP_CONST2:
466 case CC_OP_CONST3:
467 case CC_OP_STATIC:
468 case CC_OP_DYNAMIC:
469 break;
472 switch (s->cc_op) {
473 case CC_OP_CONST0:
474 case CC_OP_CONST1:
475 case CC_OP_CONST2:
476 case CC_OP_CONST3:
477 /* s->cc_op is the cc value */
478 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
479 break;
480 case CC_OP_STATIC:
481 /* env->cc_op already is the cc value */
482 break;
483 case CC_OP_NZ:
484 case CC_OP_ABS_64:
485 case CC_OP_NABS_64:
486 case CC_OP_ABS_32:
487 case CC_OP_NABS_32:
488 case CC_OP_LTGT0_32:
489 case CC_OP_LTGT0_64:
490 case CC_OP_COMP_32:
491 case CC_OP_COMP_64:
492 case CC_OP_NZ_F32:
493 case CC_OP_NZ_F64:
494 case CC_OP_FLOGR:
495 /* 1 argument */
496 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
497 break;
498 case CC_OP_ICM:
499 case CC_OP_LTGT_32:
500 case CC_OP_LTGT_64:
501 case CC_OP_LTUGTU_32:
502 case CC_OP_LTUGTU_64:
503 case CC_OP_TM_32:
504 case CC_OP_TM_64:
505 case CC_OP_SLA_32:
506 case CC_OP_SLA_64:
507 case CC_OP_NZ_F128:
508 /* 2 arguments */
509 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
510 break;
511 case CC_OP_ADD_64:
512 case CC_OP_ADDU_64:
513 case CC_OP_ADDC_64:
514 case CC_OP_SUB_64:
515 case CC_OP_SUBU_64:
516 case CC_OP_SUBB_64:
517 case CC_OP_ADD_32:
518 case CC_OP_ADDU_32:
519 case CC_OP_ADDC_32:
520 case CC_OP_SUB_32:
521 case CC_OP_SUBU_32:
522 case CC_OP_SUBB_32:
523 /* 3 arguments */
524 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
525 break;
526 case CC_OP_DYNAMIC:
527 /* unknown operation - assume 3 arguments and cc_op in env */
528 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
529 break;
530 default:
531 tcg_abort();
534 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
535 tcg_temp_free_i32(local_cc_op);
537 if (!TCGV_IS_UNUSED_I64(dummy)) {
538 tcg_temp_free_i64(dummy);
541 /* We now have cc in cc_op as constant */
542 set_cc_static(s);
545 static int use_goto_tb(DisasContext *s, uint64_t dest)
547 /* NOTE: we handle the case where the TB spans two pages here */
548 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
549 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
550 && !s->singlestep_enabled
551 && !(s->tb->cflags & CF_LAST_IO));
554 static void account_noninline_branch(DisasContext *s, int cc_op)
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_miss[cc_op]++;
558 #endif
561 static void account_inline_branch(DisasContext *s, int cc_op)
563 #ifdef DEBUG_INLINE_BRANCHES
564 inline_branch_hit[cc_op]++;
565 #endif
568 /* Table of mask values to comparison codes, given a comparison as input.
569 For such, CC=3 should not be possible. */
570 static const TCGCond ltgt_cond[16] = {
571 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
572 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
573 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
574 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
575 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
576 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
577 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
578 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
581 /* Table of mask values to comparison codes, given a logic op as input.
582 For such, only CC=0 and CC=1 should be possible. */
583 static const TCGCond nz_cond[16] = {
584 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
585 TCG_COND_NEVER, TCG_COND_NEVER,
586 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
587 TCG_COND_NE, TCG_COND_NE,
588 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
589 TCG_COND_EQ, TCG_COND_EQ,
590 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
591 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
594 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
595 details required to generate a TCG comparison. */
596 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
598 TCGCond cond;
599 enum cc_op old_cc_op = s->cc_op;
601 if (mask == 15 || mask == 0) {
602 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
603 c->u.s32.a = cc_op;
604 c->u.s32.b = cc_op;
605 c->g1 = c->g2 = true;
606 c->is_64 = false;
607 return;
610 /* Find the TCG condition for the mask + cc op. */
611 switch (old_cc_op) {
612 case CC_OP_LTGT0_32:
613 case CC_OP_LTGT0_64:
614 case CC_OP_LTGT_32:
615 case CC_OP_LTGT_64:
616 cond = ltgt_cond[mask];
617 if (cond == TCG_COND_NEVER) {
618 goto do_dynamic;
620 account_inline_branch(s, old_cc_op);
621 break;
623 case CC_OP_LTUGTU_32:
624 case CC_OP_LTUGTU_64:
625 cond = tcg_unsigned_cond(ltgt_cond[mask]);
626 if (cond == TCG_COND_NEVER) {
627 goto do_dynamic;
629 account_inline_branch(s, old_cc_op);
630 break;
632 case CC_OP_NZ:
633 cond = nz_cond[mask];
634 if (cond == TCG_COND_NEVER) {
635 goto do_dynamic;
637 account_inline_branch(s, old_cc_op);
638 break;
640 case CC_OP_TM_32:
641 case CC_OP_TM_64:
642 switch (mask) {
643 case 8:
644 cond = TCG_COND_EQ;
645 break;
646 case 4 | 2 | 1:
647 cond = TCG_COND_NE;
648 break;
649 default:
650 goto do_dynamic;
652 account_inline_branch(s, old_cc_op);
653 break;
655 case CC_OP_ICM:
656 switch (mask) {
657 case 8:
658 cond = TCG_COND_EQ;
659 break;
660 case 4 | 2 | 1:
661 case 4 | 2:
662 cond = TCG_COND_NE;
663 break;
664 default:
665 goto do_dynamic;
667 account_inline_branch(s, old_cc_op);
668 break;
670 case CC_OP_FLOGR:
671 switch (mask & 0xa) {
672 case 8: /* src == 0 -> no one bit found */
673 cond = TCG_COND_EQ;
674 break;
675 case 2: /* src != 0 -> one bit found */
676 cond = TCG_COND_NE;
677 break;
678 default:
679 goto do_dynamic;
681 account_inline_branch(s, old_cc_op);
682 break;
684 case CC_OP_ADDU_32:
685 case CC_OP_ADDU_64:
686 switch (mask) {
687 case 8 | 2: /* vr == 0 */
688 cond = TCG_COND_EQ;
689 break;
690 case 4 | 1: /* vr != 0 */
691 cond = TCG_COND_NE;
692 break;
693 case 8 | 4: /* no carry -> vr >= src */
694 cond = TCG_COND_GEU;
695 break;
696 case 2 | 1: /* carry -> vr < src */
697 cond = TCG_COND_LTU;
698 break;
699 default:
700 goto do_dynamic;
702 account_inline_branch(s, old_cc_op);
703 break;
705 case CC_OP_SUBU_32:
706 case CC_OP_SUBU_64:
707 /* Note that CC=0 is impossible; treat it as dont-care. */
708 switch (mask & 7) {
709 case 2: /* zero -> op1 == op2 */
710 cond = TCG_COND_EQ;
711 break;
712 case 4 | 1: /* !zero -> op1 != op2 */
713 cond = TCG_COND_NE;
714 break;
715 case 4: /* borrow (!carry) -> op1 < op2 */
716 cond = TCG_COND_LTU;
717 break;
718 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
719 cond = TCG_COND_GEU;
720 break;
721 default:
722 goto do_dynamic;
724 account_inline_branch(s, old_cc_op);
725 break;
727 default:
728 do_dynamic:
729 /* Calculate cc value. */
730 gen_op_calc_cc(s);
731 /* FALLTHRU */
733 case CC_OP_STATIC:
734 /* Jump based on CC. We'll load up the real cond below;
735 the assignment here merely avoids a compiler warning. */
736 account_noninline_branch(s, old_cc_op);
737 old_cc_op = CC_OP_STATIC;
738 cond = TCG_COND_NEVER;
739 break;
742 /* Load up the arguments of the comparison. */
743 c->is_64 = true;
744 c->g1 = c->g2 = false;
745 switch (old_cc_op) {
746 case CC_OP_LTGT0_32:
747 c->is_64 = false;
748 c->u.s32.a = tcg_temp_new_i32();
749 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
750 c->u.s32.b = tcg_const_i32(0);
751 break;
752 case CC_OP_LTGT_32:
753 case CC_OP_LTUGTU_32:
754 case CC_OP_SUBU_32:
755 c->is_64 = false;
756 c->u.s32.a = tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
758 c->u.s32.b = tcg_temp_new_i32();
759 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
760 break;
762 case CC_OP_LTGT0_64:
763 case CC_OP_NZ:
764 case CC_OP_FLOGR:
765 c->u.s64.a = cc_dst;
766 c->u.s64.b = tcg_const_i64(0);
767 c->g1 = true;
768 break;
769 case CC_OP_LTGT_64:
770 case CC_OP_LTUGTU_64:
771 case CC_OP_SUBU_64:
772 c->u.s64.a = cc_src;
773 c->u.s64.b = cc_dst;
774 c->g1 = c->g2 = true;
775 break;
777 case CC_OP_TM_32:
778 case CC_OP_TM_64:
779 case CC_OP_ICM:
780 c->u.s64.a = tcg_temp_new_i64();
781 c->u.s64.b = tcg_const_i64(0);
782 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
783 break;
785 case CC_OP_ADDU_32:
786 c->is_64 = false;
787 c->u.s32.a = tcg_temp_new_i32();
788 c->u.s32.b = tcg_temp_new_i32();
789 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
790 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
791 tcg_gen_movi_i32(c->u.s32.b, 0);
792 } else {
793 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
795 break;
797 case CC_OP_ADDU_64:
798 c->u.s64.a = cc_vr;
799 c->g1 = true;
800 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
801 c->u.s64.b = tcg_const_i64(0);
802 } else {
803 c->u.s64.b = cc_src;
804 c->g2 = true;
806 break;
808 case CC_OP_STATIC:
809 c->is_64 = false;
810 c->u.s32.a = cc_op;
811 c->g1 = true;
812 switch (mask) {
813 case 0x8 | 0x4 | 0x2: /* cc != 3 */
814 cond = TCG_COND_NE;
815 c->u.s32.b = tcg_const_i32(3);
816 break;
817 case 0x8 | 0x4 | 0x1: /* cc != 2 */
818 cond = TCG_COND_NE;
819 c->u.s32.b = tcg_const_i32(2);
820 break;
821 case 0x8 | 0x2 | 0x1: /* cc != 1 */
822 cond = TCG_COND_NE;
823 c->u.s32.b = tcg_const_i32(1);
824 break;
825 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
826 cond = TCG_COND_EQ;
827 c->g1 = false;
828 c->u.s32.a = tcg_temp_new_i32();
829 c->u.s32.b = tcg_const_i32(0);
830 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
831 break;
832 case 0x8 | 0x4: /* cc < 2 */
833 cond = TCG_COND_LTU;
834 c->u.s32.b = tcg_const_i32(2);
835 break;
836 case 0x8: /* cc == 0 */
837 cond = TCG_COND_EQ;
838 c->u.s32.b = tcg_const_i32(0);
839 break;
840 case 0x4 | 0x2 | 0x1: /* cc != 0 */
841 cond = TCG_COND_NE;
842 c->u.s32.b = tcg_const_i32(0);
843 break;
844 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
845 cond = TCG_COND_NE;
846 c->g1 = false;
847 c->u.s32.a = tcg_temp_new_i32();
848 c->u.s32.b = tcg_const_i32(0);
849 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
850 break;
851 case 0x4: /* cc == 1 */
852 cond = TCG_COND_EQ;
853 c->u.s32.b = tcg_const_i32(1);
854 break;
855 case 0x2 | 0x1: /* cc > 1 */
856 cond = TCG_COND_GTU;
857 c->u.s32.b = tcg_const_i32(1);
858 break;
859 case 0x2: /* cc == 2 */
860 cond = TCG_COND_EQ;
861 c->u.s32.b = tcg_const_i32(2);
862 break;
863 case 0x1: /* cc == 3 */
864 cond = TCG_COND_EQ;
865 c->u.s32.b = tcg_const_i32(3);
866 break;
867 default:
868 /* CC is masked by something else: (8 >> cc) & mask. */
869 cond = TCG_COND_NE;
870 c->g1 = false;
871 c->u.s32.a = tcg_const_i32(8);
872 c->u.s32.b = tcg_const_i32(0);
873 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
874 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
875 break;
877 break;
879 default:
880 abort();
882 c->cond = cond;
885 static void free_compare(DisasCompare *c)
887 if (!c->g1) {
888 if (c->is_64) {
889 tcg_temp_free_i64(c->u.s64.a);
890 } else {
891 tcg_temp_free_i32(c->u.s32.a);
894 if (!c->g2) {
895 if (c->is_64) {
896 tcg_temp_free_i64(c->u.s64.b);
897 } else {
898 tcg_temp_free_i32(c->u.s32.b);
903 /* ====================================================================== */
904 /* Define the insn format enumeration. */
905 #define F0(N) FMT_##N,
906 #define F1(N, X1) F0(N)
907 #define F2(N, X1, X2) F0(N)
908 #define F3(N, X1, X2, X3) F0(N)
909 #define F4(N, X1, X2, X3, X4) F0(N)
910 #define F5(N, X1, X2, X3, X4, X5) F0(N)
912 typedef enum {
913 #include "insn-format.def"
914 } DisasFormat;
916 #undef F0
917 #undef F1
918 #undef F2
919 #undef F3
920 #undef F4
921 #undef F5
923 /* Define a structure to hold the decoded fields. We'll store each inside
924 an array indexed by an enum. In order to conserve memory, we'll arrange
925 for fields that do not exist at the same time to overlap, thus the "C"
926 for compact. For checking purposes there is an "O" for original index
927 as well that will be applied to availability bitmaps. */
929 enum DisasFieldIndexO {
930 FLD_O_r1,
931 FLD_O_r2,
932 FLD_O_r3,
933 FLD_O_m1,
934 FLD_O_m3,
935 FLD_O_m4,
936 FLD_O_b1,
937 FLD_O_b2,
938 FLD_O_b4,
939 FLD_O_d1,
940 FLD_O_d2,
941 FLD_O_d4,
942 FLD_O_x2,
943 FLD_O_l1,
944 FLD_O_l2,
945 FLD_O_i1,
946 FLD_O_i2,
947 FLD_O_i3,
948 FLD_O_i4,
949 FLD_O_i5
952 enum DisasFieldIndexC {
953 FLD_C_r1 = 0,
954 FLD_C_m1 = 0,
955 FLD_C_b1 = 0,
956 FLD_C_i1 = 0,
958 FLD_C_r2 = 1,
959 FLD_C_b2 = 1,
960 FLD_C_i2 = 1,
962 FLD_C_r3 = 2,
963 FLD_C_m3 = 2,
964 FLD_C_i3 = 2,
966 FLD_C_m4 = 3,
967 FLD_C_b4 = 3,
968 FLD_C_i4 = 3,
969 FLD_C_l1 = 3,
971 FLD_C_i5 = 4,
972 FLD_C_d1 = 4,
974 FLD_C_d2 = 5,
976 FLD_C_d4 = 6,
977 FLD_C_x2 = 6,
978 FLD_C_l2 = 6,
980 NUM_C_FIELD = 7
983 struct DisasFields {
984 unsigned op:8;
985 unsigned op2:8;
986 unsigned presentC:16;
987 unsigned int presentO;
988 int c[NUM_C_FIELD];
991 /* This is the way fields are to be accessed out of DisasFields. */
992 #define have_field(S, F) have_field1((S), FLD_O_##F)
993 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
995 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
997 return (f->presentO >> c) & 1;
1000 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1001 enum DisasFieldIndexC c)
1003 assert(have_field1(f, o));
1004 return f->c[c];
1007 /* Describe the layout of each field in each format. */
1008 typedef struct DisasField {
1009 unsigned int beg:8;
1010 unsigned int size:8;
1011 unsigned int type:2;
1012 unsigned int indexC:6;
1013 enum DisasFieldIndexO indexO:8;
1014 } DisasField;
1016 typedef struct DisasFormatInfo {
1017 DisasField op[NUM_C_FIELD];
1018 } DisasFormatInfo;
1020 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1021 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1022 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1024 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1025 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1026 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1027 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1029 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1030 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1031 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1032 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1033 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1035 #define F0(N) { { } },
1036 #define F1(N, X1) { { X1 } },
1037 #define F2(N, X1, X2) { { X1, X2 } },
1038 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1039 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1040 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1042 static const DisasFormatInfo format_info[] = {
1043 #include "insn-format.def"
1046 #undef F0
1047 #undef F1
1048 #undef F2
1049 #undef F3
1050 #undef F4
1051 #undef F5
1052 #undef R
1053 #undef M
1054 #undef BD
1055 #undef BXD
1056 #undef BDL
1057 #undef BXDL
1058 #undef I
1059 #undef L
1061 /* Generally, we'll extract operands into this structures, operate upon
1062 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1063 of routines below for more details. */
1064 typedef struct {
1065 bool g_out, g_out2, g_in1, g_in2;
1066 TCGv_i64 out, out2, in1, in2;
1067 TCGv_i64 addr1;
1068 } DisasOps;
1070 /* Instructions can place constraints on their operands, raising specification
1071 exceptions if they are violated. To make this easy to automate, each "in1",
1072 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1073 of the following, or 0. To make this easy to document, we'll put the
1074 SPEC_<name> defines next to <name>. */
1076 #define SPEC_r1_even 1
1077 #define SPEC_r2_even 2
1078 #define SPEC_r3_even 4
1079 #define SPEC_r1_f128 8
1080 #define SPEC_r2_f128 16
1082 /* Return values from translate_one, indicating the state of the TB. */
1083 typedef enum {
1084 /* Continue the TB. */
1085 NO_EXIT,
1086 /* We have emitted one or more goto_tb. No fixup required. */
1087 EXIT_GOTO_TB,
1088 /* We are not using a goto_tb (for whatever reason), but have updated
1089 the PC (for whatever reason), so there's no need to do it again on
1090 exiting the TB. */
1091 EXIT_PC_UPDATED,
1092 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1093 updated the PC for the next instruction to be executed. */
1094 EXIT_PC_STALE,
1095 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1096 No following code will be executed. */
1097 EXIT_NORETURN,
1098 } ExitStatus;
1100 typedef enum DisasFacility {
1101 FAC_Z, /* zarch (default) */
1102 FAC_CASS, /* compare and swap and store */
1103 FAC_CASS2, /* compare and swap and store 2*/
1104 FAC_DFP, /* decimal floating point */
1105 FAC_DFPR, /* decimal floating point rounding */
1106 FAC_DO, /* distinct operands */
1107 FAC_EE, /* execute extensions */
1108 FAC_EI, /* extended immediate */
1109 FAC_FPE, /* floating point extension */
1110 FAC_FPSSH, /* floating point support sign handling */
1111 FAC_FPRGR, /* FPR-GR transfer */
1112 FAC_GIE, /* general instructions extension */
1113 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1114 FAC_HW, /* high-word */
1115 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1116 FAC_LOC, /* load/store on condition */
1117 FAC_LD, /* long displacement */
1118 FAC_PC, /* population count */
1119 FAC_SCF, /* store clock fast */
1120 FAC_SFLE, /* store facility list extended */
1121 FAC_ILA, /* interlocked access facility 1 */
1122 } DisasFacility;
1124 struct DisasInsn {
1125 unsigned opc:16;
1126 DisasFormat fmt:8;
1127 DisasFacility fac:8;
1128 unsigned spec:8;
1130 const char *name;
1132 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1136 void (*help_cout)(DisasContext *, DisasOps *);
1137 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1139 uint64_t data;
1142 /* ====================================================================== */
1143 /* Miscellaneous helpers, used by several operations. */
1145 static void help_l2_shift(DisasContext *s, DisasFields *f,
1146 DisasOps *o, int mask)
1148 int b2 = get_field(f, b2);
1149 int d2 = get_field(f, d2);
1151 if (b2 == 0) {
1152 o->in2 = tcg_const_i64(d2 & mask);
1153 } else {
1154 o->in2 = get_address(s, 0, b2, d2);
1155 tcg_gen_andi_i64(o->in2, o->in2, mask);
1159 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1161 if (dest == s->next_pc) {
1162 return NO_EXIT;
1164 if (use_goto_tb(s, dest)) {
1165 update_cc_op(s);
1166 tcg_gen_goto_tb(0);
1167 tcg_gen_movi_i64(psw_addr, dest);
1168 tcg_gen_exit_tb((uintptr_t)s->tb);
1169 return EXIT_GOTO_TB;
1170 } else {
1171 tcg_gen_movi_i64(psw_addr, dest);
1172 return EXIT_PC_UPDATED;
1176 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1177 bool is_imm, int imm, TCGv_i64 cdest)
1179 ExitStatus ret;
1180 uint64_t dest = s->pc + 2 * imm;
1181 TCGLabel *lab;
1183 /* Take care of the special cases first. */
1184 if (c->cond == TCG_COND_NEVER) {
1185 ret = NO_EXIT;
1186 goto egress;
1188 if (is_imm) {
1189 if (dest == s->next_pc) {
1190 /* Branch to next. */
1191 ret = NO_EXIT;
1192 goto egress;
1194 if (c->cond == TCG_COND_ALWAYS) {
1195 ret = help_goto_direct(s, dest);
1196 goto egress;
1198 } else {
1199 if (TCGV_IS_UNUSED_I64(cdest)) {
1200 /* E.g. bcr %r0 -> no branch. */
1201 ret = NO_EXIT;
1202 goto egress;
1204 if (c->cond == TCG_COND_ALWAYS) {
1205 tcg_gen_mov_i64(psw_addr, cdest);
1206 ret = EXIT_PC_UPDATED;
1207 goto egress;
1211 if (use_goto_tb(s, s->next_pc)) {
1212 if (is_imm && use_goto_tb(s, dest)) {
1213 /* Both exits can use goto_tb. */
1214 update_cc_op(s);
1216 lab = gen_new_label();
1217 if (c->is_64) {
1218 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1219 } else {
1220 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1223 /* Branch not taken. */
1224 tcg_gen_goto_tb(0);
1225 tcg_gen_movi_i64(psw_addr, s->next_pc);
1226 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1228 /* Branch taken. */
1229 gen_set_label(lab);
1230 tcg_gen_goto_tb(1);
1231 tcg_gen_movi_i64(psw_addr, dest);
1232 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1234 ret = EXIT_GOTO_TB;
1235 } else {
1236 /* Fallthru can use goto_tb, but taken branch cannot. */
1237 /* Store taken branch destination before the brcond. This
1238 avoids having to allocate a new local temp to hold it.
1239 We'll overwrite this in the not taken case anyway. */
1240 if (!is_imm) {
1241 tcg_gen_mov_i64(psw_addr, cdest);
1244 lab = gen_new_label();
1245 if (c->is_64) {
1246 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1247 } else {
1248 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1251 /* Branch not taken. */
1252 update_cc_op(s);
1253 tcg_gen_goto_tb(0);
1254 tcg_gen_movi_i64(psw_addr, s->next_pc);
1255 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1257 gen_set_label(lab);
1258 if (is_imm) {
1259 tcg_gen_movi_i64(psw_addr, dest);
1261 ret = EXIT_PC_UPDATED;
1263 } else {
1264 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1265 Most commonly we're single-stepping or some other condition that
1266 disables all use of goto_tb. Just update the PC and exit. */
1268 TCGv_i64 next = tcg_const_i64(s->next_pc);
1269 if (is_imm) {
1270 cdest = tcg_const_i64(dest);
1273 if (c->is_64) {
1274 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1275 cdest, next);
1276 } else {
1277 TCGv_i32 t0 = tcg_temp_new_i32();
1278 TCGv_i64 t1 = tcg_temp_new_i64();
1279 TCGv_i64 z = tcg_const_i64(0);
1280 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1281 tcg_gen_extu_i32_i64(t1, t0);
1282 tcg_temp_free_i32(t0);
1283 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1284 tcg_temp_free_i64(t1);
1285 tcg_temp_free_i64(z);
1288 if (is_imm) {
1289 tcg_temp_free_i64(cdest);
1291 tcg_temp_free_i64(next);
1293 ret = EXIT_PC_UPDATED;
1296 egress:
1297 free_compare(c);
1298 return ret;
1301 /* ====================================================================== */
1302 /* The operations. These perform the bulk of the work for any insn,
1303 usually after the operands have been loaded and output initialized. */
1305 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1307 gen_helper_abs_i64(o->out, o->in2);
1308 return NO_EXIT;
1311 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1313 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1314 return NO_EXIT;
1317 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1319 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1320 return NO_EXIT;
1323 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1325 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1326 tcg_gen_mov_i64(o->out2, o->in2);
1327 return NO_EXIT;
1330 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1332 tcg_gen_add_i64(o->out, o->in1, o->in2);
1333 return NO_EXIT;
1336 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1338 DisasCompare cmp;
1339 TCGv_i64 carry;
1341 tcg_gen_add_i64(o->out, o->in1, o->in2);
1343 /* The carry flag is the msb of CC, therefore the branch mask that would
1344 create that comparison is 3. Feeding the generated comparison to
1345 setcond produces the carry flag that we desire. */
1346 disas_jcc(s, &cmp, 3);
1347 carry = tcg_temp_new_i64();
1348 if (cmp.is_64) {
1349 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1350 } else {
1351 TCGv_i32 t = tcg_temp_new_i32();
1352 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1353 tcg_gen_extu_i32_i64(carry, t);
1354 tcg_temp_free_i32(t);
1356 free_compare(&cmp);
1358 tcg_gen_add_i64(o->out, o->out, carry);
1359 tcg_temp_free_i64(carry);
1360 return NO_EXIT;
1363 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1365 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1366 return NO_EXIT;
1369 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1371 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1372 return NO_EXIT;
1375 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1377 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1378 return_low128(o->out2);
1379 return NO_EXIT;
1382 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1384 tcg_gen_and_i64(o->out, o->in1, o->in2);
1385 return NO_EXIT;
1388 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1390 int shift = s->insn->data & 0xff;
1391 int size = s->insn->data >> 8;
1392 uint64_t mask = ((1ull << size) - 1) << shift;
1394 assert(!o->g_in2);
1395 tcg_gen_shli_i64(o->in2, o->in2, shift);
1396 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1397 tcg_gen_and_i64(o->out, o->in1, o->in2);
1399 /* Produce the CC from only the bits manipulated. */
1400 tcg_gen_andi_i64(cc_dst, o->out, mask);
1401 set_cc_nz_u64(s, cc_dst);
1402 return NO_EXIT;
1405 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1407 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1408 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1409 tcg_gen_mov_i64(psw_addr, o->in2);
1410 return EXIT_PC_UPDATED;
1411 } else {
1412 return NO_EXIT;
1416 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1418 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1419 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1422 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1424 int m1 = get_field(s->fields, m1);
1425 bool is_imm = have_field(s->fields, i2);
1426 int imm = is_imm ? get_field(s->fields, i2) : 0;
1427 DisasCompare c;
1429 disas_jcc(s, &c, m1);
1430 return help_branch(s, &c, is_imm, imm, o->in2);
1433 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1435 int r1 = get_field(s->fields, r1);
1436 bool is_imm = have_field(s->fields, i2);
1437 int imm = is_imm ? get_field(s->fields, i2) : 0;
1438 DisasCompare c;
1439 TCGv_i64 t;
1441 c.cond = TCG_COND_NE;
1442 c.is_64 = false;
1443 c.g1 = false;
1444 c.g2 = false;
1446 t = tcg_temp_new_i64();
1447 tcg_gen_subi_i64(t, regs[r1], 1);
1448 store_reg32_i64(r1, t);
1449 c.u.s32.a = tcg_temp_new_i32();
1450 c.u.s32.b = tcg_const_i32(0);
1451 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1452 tcg_temp_free_i64(t);
1454 return help_branch(s, &c, is_imm, imm, o->in2);
1457 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1459 int r1 = get_field(s->fields, r1);
1460 bool is_imm = have_field(s->fields, i2);
1461 int imm = is_imm ? get_field(s->fields, i2) : 0;
1462 DisasCompare c;
1464 c.cond = TCG_COND_NE;
1465 c.is_64 = true;
1466 c.g1 = true;
1467 c.g2 = false;
1469 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1470 c.u.s64.a = regs[r1];
1471 c.u.s64.b = tcg_const_i64(0);
1473 return help_branch(s, &c, is_imm, imm, o->in2);
1476 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1478 int r1 = get_field(s->fields, r1);
1479 int r3 = get_field(s->fields, r3);
1480 bool is_imm = have_field(s->fields, i2);
1481 int imm = is_imm ? get_field(s->fields, i2) : 0;
1482 DisasCompare c;
1483 TCGv_i64 t;
1485 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1486 c.is_64 = false;
1487 c.g1 = false;
1488 c.g2 = false;
1490 t = tcg_temp_new_i64();
1491 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1492 c.u.s32.a = tcg_temp_new_i32();
1493 c.u.s32.b = tcg_temp_new_i32();
1494 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1495 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1496 store_reg32_i64(r1, t);
1497 tcg_temp_free_i64(t);
1499 return help_branch(s, &c, is_imm, imm, o->in2);
1502 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1504 int r1 = get_field(s->fields, r1);
1505 int r3 = get_field(s->fields, r3);
1506 bool is_imm = have_field(s->fields, i2);
1507 int imm = is_imm ? get_field(s->fields, i2) : 0;
1508 DisasCompare c;
1510 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1511 c.is_64 = true;
1513 if (r1 == (r3 | 1)) {
1514 c.u.s64.b = load_reg(r3 | 1);
1515 c.g2 = false;
1516 } else {
1517 c.u.s64.b = regs[r3 | 1];
1518 c.g2 = true;
1521 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1522 c.u.s64.a = regs[r1];
1523 c.g1 = true;
1525 return help_branch(s, &c, is_imm, imm, o->in2);
1528 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1530 int imm, m3 = get_field(s->fields, m3);
1531 bool is_imm;
1532 DisasCompare c;
1534 c.cond = ltgt_cond[m3];
1535 if (s->insn->data) {
1536 c.cond = tcg_unsigned_cond(c.cond);
1538 c.is_64 = c.g1 = c.g2 = true;
1539 c.u.s64.a = o->in1;
1540 c.u.s64.b = o->in2;
1542 is_imm = have_field(s->fields, i4);
1543 if (is_imm) {
1544 imm = get_field(s->fields, i4);
1545 } else {
1546 imm = 0;
1547 o->out = get_address(s, 0, get_field(s->fields, b4),
1548 get_field(s->fields, d4));
1551 return help_branch(s, &c, is_imm, imm, o->out);
1554 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1556 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1557 set_cc_static(s);
1558 return NO_EXIT;
1561 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1563 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1564 set_cc_static(s);
1565 return NO_EXIT;
1568 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1570 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1571 set_cc_static(s);
1572 return NO_EXIT;
1575 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1577 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1578 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1579 tcg_temp_free_i32(m3);
1580 gen_set_cc_nz_f32(s, o->in2);
1581 return NO_EXIT;
1584 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1586 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1587 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1588 tcg_temp_free_i32(m3);
1589 gen_set_cc_nz_f64(s, o->in2);
1590 return NO_EXIT;
1593 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1595 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1596 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1597 tcg_temp_free_i32(m3);
1598 gen_set_cc_nz_f128(s, o->in1, o->in2);
1599 return NO_EXIT;
1602 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1604 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1605 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1606 tcg_temp_free_i32(m3);
1607 gen_set_cc_nz_f32(s, o->in2);
1608 return NO_EXIT;
1611 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1613 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1614 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1615 tcg_temp_free_i32(m3);
1616 gen_set_cc_nz_f64(s, o->in2);
1617 return NO_EXIT;
1620 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1622 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1623 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1624 tcg_temp_free_i32(m3);
1625 gen_set_cc_nz_f128(s, o->in1, o->in2);
1626 return NO_EXIT;
1629 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1631 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1632 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1633 tcg_temp_free_i32(m3);
1634 gen_set_cc_nz_f32(s, o->in2);
1635 return NO_EXIT;
1638 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1640 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1641 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1642 tcg_temp_free_i32(m3);
1643 gen_set_cc_nz_f64(s, o->in2);
1644 return NO_EXIT;
1647 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1652 gen_set_cc_nz_f128(s, o->in1, o->in2);
1653 return NO_EXIT;
1656 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1658 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1659 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1660 tcg_temp_free_i32(m3);
1661 gen_set_cc_nz_f32(s, o->in2);
1662 return NO_EXIT;
1665 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1667 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1668 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1669 tcg_temp_free_i32(m3);
1670 gen_set_cc_nz_f64(s, o->in2);
1671 return NO_EXIT;
1674 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1676 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1677 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1678 tcg_temp_free_i32(m3);
1679 gen_set_cc_nz_f128(s, o->in1, o->in2);
1680 return NO_EXIT;
1683 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 return NO_EXIT;
1691 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1693 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1694 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1695 tcg_temp_free_i32(m3);
1696 return NO_EXIT;
1699 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 return_low128(o->out2);
1705 return NO_EXIT;
1708 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 return NO_EXIT;
1716 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 return NO_EXIT;
1724 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1726 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1727 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1728 tcg_temp_free_i32(m3);
1729 return_low128(o->out2);
1730 return NO_EXIT;
1733 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1735 int r2 = get_field(s->fields, r2);
1736 TCGv_i64 len = tcg_temp_new_i64();
1738 potential_page_fault(s);
1739 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1740 set_cc_static(s);
1741 return_low128(o->out);
1743 tcg_gen_add_i64(regs[r2], regs[r2], len);
1744 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1745 tcg_temp_free_i64(len);
1747 return NO_EXIT;
1750 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1752 int l = get_field(s->fields, l1);
1753 TCGv_i32 vl;
1755 switch (l + 1) {
1756 case 1:
1757 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1758 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1759 break;
1760 case 2:
1761 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1762 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1763 break;
1764 case 4:
1765 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1766 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1767 break;
1768 case 8:
1769 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1770 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1771 break;
1772 default:
1773 potential_page_fault(s);
1774 vl = tcg_const_i32(l);
1775 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1776 tcg_temp_free_i32(vl);
1777 set_cc_static(s);
1778 return NO_EXIT;
1780 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1781 return NO_EXIT;
1784 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1786 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1787 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1788 potential_page_fault(s);
1789 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1790 tcg_temp_free_i32(r1);
1791 tcg_temp_free_i32(r3);
1792 set_cc_static(s);
1793 return NO_EXIT;
1796 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 TCGv_i32 t1 = tcg_temp_new_i32();
1800 tcg_gen_trunc_i64_i32(t1, o->in1);
1801 potential_page_fault(s);
1802 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1803 set_cc_static(s);
1804 tcg_temp_free_i32(t1);
1805 tcg_temp_free_i32(m3);
1806 return NO_EXIT;
1809 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1811 potential_page_fault(s);
1812 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1813 set_cc_static(s);
1814 return_low128(o->in2);
1815 return NO_EXIT;
1818 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1820 TCGv_i64 t = tcg_temp_new_i64();
1821 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1822 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1823 tcg_gen_or_i64(o->out, o->out, t);
1824 tcg_temp_free_i64(t);
1825 return NO_EXIT;
1828 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1830 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1831 int d2 = get_field(s->fields, d2);
1832 int b2 = get_field(s->fields, b2);
1833 int is_64 = s->insn->data;
1834 TCGv_i64 addr, mem, cc, z;
1836 /* Note that in1 = R3 (new value) and
1837 in2 = (zero-extended) R1 (expected value). */
1839 /* Load the memory into the (temporary) output. While the PoO only talks
1840 about moving the memory to R1 on inequality, if we include equality it
1841 means that R1 is equal to the memory in all conditions. */
1842 addr = get_address(s, 0, b2, d2);
1843 if (is_64) {
1844 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1845 } else {
1846 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1849 /* Are the memory and expected values (un)equal? Note that this setcond
1850 produces the output CC value, thus the NE sense of the test. */
1851 cc = tcg_temp_new_i64();
1852 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1854 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1855 Recall that we are allowed to unconditionally issue the store (and
1856 thus any possible write trap), so (re-)store the original contents
1857 of MEM in case of inequality. */
1858 z = tcg_const_i64(0);
1859 mem = tcg_temp_new_i64();
1860 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1861 if (is_64) {
1862 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1863 } else {
1864 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1866 tcg_temp_free_i64(z);
1867 tcg_temp_free_i64(mem);
1868 tcg_temp_free_i64(addr);
1870 /* Store CC back to cc_op. Wait until after the store so that any
1871 exception gets the old cc_op value. */
1872 tcg_gen_trunc_i64_i32(cc_op, cc);
1873 tcg_temp_free_i64(cc);
1874 set_cc_static(s);
1875 return NO_EXIT;
1878 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1880 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1881 int r1 = get_field(s->fields, r1);
1882 int r3 = get_field(s->fields, r3);
1883 int d2 = get_field(s->fields, d2);
1884 int b2 = get_field(s->fields, b2);
1885 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1887 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1889 addrh = get_address(s, 0, b2, d2);
1890 addrl = get_address(s, 0, b2, d2 + 8);
1891 outh = tcg_temp_new_i64();
1892 outl = tcg_temp_new_i64();
1894 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1895 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1897 /* Fold the double-word compare with arithmetic. */
1898 cc = tcg_temp_new_i64();
1899 z = tcg_temp_new_i64();
1900 tcg_gen_xor_i64(cc, outh, regs[r1]);
1901 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1902 tcg_gen_or_i64(cc, cc, z);
1903 tcg_gen_movi_i64(z, 0);
1904 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1906 memh = tcg_temp_new_i64();
1907 meml = tcg_temp_new_i64();
1908 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1909 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1910 tcg_temp_free_i64(z);
1912 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1913 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1914 tcg_temp_free_i64(memh);
1915 tcg_temp_free_i64(meml);
1916 tcg_temp_free_i64(addrh);
1917 tcg_temp_free_i64(addrl);
1919 /* Save back state now that we've passed all exceptions. */
1920 tcg_gen_mov_i64(regs[r1], outh);
1921 tcg_gen_mov_i64(regs[r1 + 1], outl);
1922 tcg_gen_trunc_i64_i32(cc_op, cc);
1923 tcg_temp_free_i64(outh);
1924 tcg_temp_free_i64(outl);
1925 tcg_temp_free_i64(cc);
1926 set_cc_static(s);
1927 return NO_EXIT;
1930 #ifndef CONFIG_USER_ONLY
1931 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1933 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1934 check_privileged(s);
1935 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1936 tcg_temp_free_i32(r1);
1937 set_cc_static(s);
1938 return NO_EXIT;
1940 #endif
1942 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1944 TCGv_i64 t1 = tcg_temp_new_i64();
1945 TCGv_i32 t2 = tcg_temp_new_i32();
1946 tcg_gen_trunc_i64_i32(t2, o->in1);
1947 gen_helper_cvd(t1, t2);
1948 tcg_temp_free_i32(t2);
1949 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1950 tcg_temp_free_i64(t1);
1951 return NO_EXIT;
1954 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1956 int m3 = get_field(s->fields, m3);
1957 TCGLabel *lab = gen_new_label();
1958 TCGv_i32 t;
1959 TCGCond c;
1961 c = tcg_invert_cond(ltgt_cond[m3]);
1962 if (s->insn->data) {
1963 c = tcg_unsigned_cond(c);
1965 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1967 /* Set DXC to 0xff. */
1968 t = tcg_temp_new_i32();
1969 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1970 tcg_gen_ori_i32(t, t, 0xff00);
1971 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1972 tcg_temp_free_i32(t);
1974 /* Trap. */
1975 gen_program_exception(s, PGM_DATA);
1977 gen_set_label(lab);
1978 return NO_EXIT;
1981 #ifndef CONFIG_USER_ONLY
1982 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1984 TCGv_i32 tmp;
1986 check_privileged(s);
1987 potential_page_fault(s);
1989 /* We pretend the format is RX_a so that D2 is the field we want. */
1990 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1991 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1992 tcg_temp_free_i32(tmp);
1993 return NO_EXIT;
1995 #endif
1997 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1999 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2000 return_low128(o->out);
2001 return NO_EXIT;
2004 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2006 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2007 return_low128(o->out);
2008 return NO_EXIT;
2011 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2013 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2014 return_low128(o->out);
2015 return NO_EXIT;
2018 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2020 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2021 return_low128(o->out);
2022 return NO_EXIT;
2025 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2027 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2028 return NO_EXIT;
2031 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2033 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2034 return NO_EXIT;
2037 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2039 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2040 return_low128(o->out2);
2041 return NO_EXIT;
2044 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2046 int r2 = get_field(s->fields, r2);
2047 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2048 return NO_EXIT;
2051 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2053 /* No cache information provided. */
2054 tcg_gen_movi_i64(o->out, -1);
2055 return NO_EXIT;
2058 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2060 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2061 return NO_EXIT;
2064 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2066 int r1 = get_field(s->fields, r1);
2067 int r2 = get_field(s->fields, r2);
2068 TCGv_i64 t = tcg_temp_new_i64();
2070 /* Note the "subsequently" in the PoO, which implies a defined result
2071 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2072 tcg_gen_shri_i64(t, psw_mask, 32);
2073 store_reg32_i64(r1, t);
2074 if (r2 != 0) {
2075 store_reg32_i64(r2, psw_mask);
2078 tcg_temp_free_i64(t);
2079 return NO_EXIT;
2082 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2084 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2085 tb->flags, (ab)use the tb->cs_base field as the address of
2086 the template in memory, and grab 8 bits of tb->flags/cflags for
2087 the contents of the register. We would then recognize all this
2088 in gen_intermediate_code_internal, generating code for exactly
2089 one instruction. This new TB then gets executed normally.
2091 On the other hand, this seems to be mostly used for modifying
2092 MVC inside of memcpy, which needs a helper call anyway. So
2093 perhaps this doesn't bear thinking about any further. */
2095 TCGv_i64 tmp;
2097 update_psw_addr(s);
2098 update_cc_op(s);
2100 tmp = tcg_const_i64(s->next_pc);
2101 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2102 tcg_temp_free_i64(tmp);
2104 set_cc_static(s);
2105 return NO_EXIT;
2108 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2110 /* We'll use the original input for cc computation, since we get to
2111 compare that against 0, which ought to be better than comparing
2112 the real output against 64. It also lets cc_dst be a convenient
2113 temporary during our computation. */
2114 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2116 /* R1 = IN ? CLZ(IN) : 64. */
2117 gen_helper_clz(o->out, o->in2);
2119 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2120 value by 64, which is undefined. But since the shift is 64 iff the
2121 input is zero, we still get the correct result after and'ing. */
2122 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2123 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2124 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2125 return NO_EXIT;
2128 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2130 int m3 = get_field(s->fields, m3);
2131 int pos, len, base = s->insn->data;
2132 TCGv_i64 tmp = tcg_temp_new_i64();
2133 uint64_t ccm;
2135 switch (m3) {
2136 case 0xf:
2137 /* Effectively a 32-bit load. */
2138 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2139 len = 32;
2140 goto one_insert;
2142 case 0xc:
2143 case 0x6:
2144 case 0x3:
2145 /* Effectively a 16-bit load. */
2146 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2147 len = 16;
2148 goto one_insert;
2150 case 0x8:
2151 case 0x4:
2152 case 0x2:
2153 case 0x1:
2154 /* Effectively an 8-bit load. */
2155 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2156 len = 8;
2157 goto one_insert;
2159 one_insert:
2160 pos = base + ctz32(m3) * 8;
2161 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2162 ccm = ((1ull << len) - 1) << pos;
2163 break;
2165 default:
2166 /* This is going to be a sequence of loads and inserts. */
2167 pos = base + 32 - 8;
2168 ccm = 0;
2169 while (m3) {
2170 if (m3 & 0x8) {
2171 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2172 tcg_gen_addi_i64(o->in2, o->in2, 1);
2173 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2174 ccm |= 0xff << pos;
2176 m3 = (m3 << 1) & 0xf;
2177 pos -= 8;
2179 break;
2182 tcg_gen_movi_i64(tmp, ccm);
2183 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2184 tcg_temp_free_i64(tmp);
2185 return NO_EXIT;
2188 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2190 int shift = s->insn->data & 0xff;
2191 int size = s->insn->data >> 8;
2192 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2193 return NO_EXIT;
2196 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2198 TCGv_i64 t1;
2200 gen_op_calc_cc(s);
2201 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2203 t1 = tcg_temp_new_i64();
2204 tcg_gen_shli_i64(t1, psw_mask, 20);
2205 tcg_gen_shri_i64(t1, t1, 36);
2206 tcg_gen_or_i64(o->out, o->out, t1);
2208 tcg_gen_extu_i32_i64(t1, cc_op);
2209 tcg_gen_shli_i64(t1, t1, 28);
2210 tcg_gen_or_i64(o->out, o->out, t1);
2211 tcg_temp_free_i64(t1);
2212 return NO_EXIT;
2215 #ifndef CONFIG_USER_ONLY
2216 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2218 check_privileged(s);
2219 gen_helper_ipte(cpu_env, o->in1, o->in2);
2220 return NO_EXIT;
2223 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2225 check_privileged(s);
2226 gen_helper_iske(o->out, cpu_env, o->in2);
2227 return NO_EXIT;
2229 #endif
2231 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2233 gen_helper_ldeb(o->out, cpu_env, o->in2);
2234 return NO_EXIT;
2237 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2239 gen_helper_ledb(o->out, cpu_env, o->in2);
2240 return NO_EXIT;
2243 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2245 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2246 return NO_EXIT;
2249 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2251 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2252 return NO_EXIT;
2255 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2257 gen_helper_lxdb(o->out, cpu_env, o->in2);
2258 return_low128(o->out2);
2259 return NO_EXIT;
2262 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2264 gen_helper_lxeb(o->out, cpu_env, o->in2);
2265 return_low128(o->out2);
2266 return NO_EXIT;
2269 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2271 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2272 return NO_EXIT;
2275 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2277 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2278 return NO_EXIT;
2281 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2283 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2284 return NO_EXIT;
2287 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2289 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2290 return NO_EXIT;
2293 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2295 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2296 return NO_EXIT;
2299 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2301 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2302 return NO_EXIT;
2305 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2307 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2308 return NO_EXIT;
2311 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2313 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2314 return NO_EXIT;
2317 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2319 DisasCompare c;
2321 disas_jcc(s, &c, get_field(s->fields, m3));
2323 if (c.is_64) {
2324 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2325 o->in2, o->in1);
2326 free_compare(&c);
2327 } else {
2328 TCGv_i32 t32 = tcg_temp_new_i32();
2329 TCGv_i64 t, z;
2331 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2332 free_compare(&c);
2334 t = tcg_temp_new_i64();
2335 tcg_gen_extu_i32_i64(t, t32);
2336 tcg_temp_free_i32(t32);
2338 z = tcg_const_i64(0);
2339 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2340 tcg_temp_free_i64(t);
2341 tcg_temp_free_i64(z);
2344 return NO_EXIT;
2347 #ifndef CONFIG_USER_ONLY
2348 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2350 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2351 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2352 check_privileged(s);
2353 potential_page_fault(s);
2354 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2355 tcg_temp_free_i32(r1);
2356 tcg_temp_free_i32(r3);
2357 return NO_EXIT;
2360 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2362 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2363 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2364 check_privileged(s);
2365 potential_page_fault(s);
2366 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2367 tcg_temp_free_i32(r1);
2368 tcg_temp_free_i32(r3);
2369 return NO_EXIT;
2371 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2373 check_privileged(s);
2374 potential_page_fault(s);
2375 gen_helper_lra(o->out, cpu_env, o->in2);
2376 set_cc_static(s);
2377 return NO_EXIT;
2380 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2382 TCGv_i64 t1, t2;
2384 check_privileged(s);
2386 t1 = tcg_temp_new_i64();
2387 t2 = tcg_temp_new_i64();
2388 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2389 tcg_gen_addi_i64(o->in2, o->in2, 4);
2390 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2391 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2392 tcg_gen_shli_i64(t1, t1, 32);
2393 gen_helper_load_psw(cpu_env, t1, t2);
2394 tcg_temp_free_i64(t1);
2395 tcg_temp_free_i64(t2);
2396 return EXIT_NORETURN;
2399 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2401 TCGv_i64 t1, t2;
2403 check_privileged(s);
2405 t1 = tcg_temp_new_i64();
2406 t2 = tcg_temp_new_i64();
2407 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2408 tcg_gen_addi_i64(o->in2, o->in2, 8);
2409 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2410 gen_helper_load_psw(cpu_env, t1, t2);
2411 tcg_temp_free_i64(t1);
2412 tcg_temp_free_i64(t2);
2413 return EXIT_NORETURN;
2415 #endif
2417 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2419 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2420 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2421 potential_page_fault(s);
2422 gen_helper_lam(cpu_env, r1, o->in2, r3);
2423 tcg_temp_free_i32(r1);
2424 tcg_temp_free_i32(r3);
2425 return NO_EXIT;
2428 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2430 int r1 = get_field(s->fields, r1);
2431 int r3 = get_field(s->fields, r3);
2432 TCGv_i64 t = tcg_temp_new_i64();
2433 TCGv_i64 t4 = tcg_const_i64(4);
2435 while (1) {
2436 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2437 store_reg32_i64(r1, t);
2438 if (r1 == r3) {
2439 break;
2441 tcg_gen_add_i64(o->in2, o->in2, t4);
2442 r1 = (r1 + 1) & 15;
2445 tcg_temp_free_i64(t);
2446 tcg_temp_free_i64(t4);
2447 return NO_EXIT;
2450 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2452 int r1 = get_field(s->fields, r1);
2453 int r3 = get_field(s->fields, r3);
2454 TCGv_i64 t = tcg_temp_new_i64();
2455 TCGv_i64 t4 = tcg_const_i64(4);
2457 while (1) {
2458 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2459 store_reg32h_i64(r1, t);
2460 if (r1 == r3) {
2461 break;
2463 tcg_gen_add_i64(o->in2, o->in2, t4);
2464 r1 = (r1 + 1) & 15;
2467 tcg_temp_free_i64(t);
2468 tcg_temp_free_i64(t4);
2469 return NO_EXIT;
2472 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2474 int r1 = get_field(s->fields, r1);
2475 int r3 = get_field(s->fields, r3);
2476 TCGv_i64 t8 = tcg_const_i64(8);
2478 while (1) {
2479 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2480 if (r1 == r3) {
2481 break;
2483 tcg_gen_add_i64(o->in2, o->in2, t8);
2484 r1 = (r1 + 1) & 15;
2487 tcg_temp_free_i64(t8);
2488 return NO_EXIT;
2491 #ifndef CONFIG_USER_ONLY
2492 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2494 check_privileged(s);
2495 potential_page_fault(s);
2496 gen_helper_lura(o->out, cpu_env, o->in2);
2497 return NO_EXIT;
2500 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2502 check_privileged(s);
2503 potential_page_fault(s);
2504 gen_helper_lurag(o->out, cpu_env, o->in2);
2505 return NO_EXIT;
2507 #endif
2509 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2511 o->out = o->in2;
2512 o->g_out = o->g_in2;
2513 TCGV_UNUSED_I64(o->in2);
2514 o->g_in2 = false;
2515 return NO_EXIT;
2518 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2520 o->out = o->in1;
2521 o->out2 = o->in2;
2522 o->g_out = o->g_in1;
2523 o->g_out2 = o->g_in2;
2524 TCGV_UNUSED_I64(o->in1);
2525 TCGV_UNUSED_I64(o->in2);
2526 o->g_in1 = o->g_in2 = false;
2527 return NO_EXIT;
2530 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2532 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2533 potential_page_fault(s);
2534 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2535 tcg_temp_free_i32(l);
2536 return NO_EXIT;
2539 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2541 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2542 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2543 potential_page_fault(s);
2544 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2545 tcg_temp_free_i32(r1);
2546 tcg_temp_free_i32(r2);
2547 set_cc_static(s);
2548 return NO_EXIT;
2551 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2553 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2554 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2555 potential_page_fault(s);
2556 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2557 tcg_temp_free_i32(r1);
2558 tcg_temp_free_i32(r3);
2559 set_cc_static(s);
2560 return NO_EXIT;
2563 #ifndef CONFIG_USER_ONLY
2564 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2566 int r1 = get_field(s->fields, l1);
2567 check_privileged(s);
2568 potential_page_fault(s);
2569 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2570 set_cc_static(s);
2571 return NO_EXIT;
2574 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2576 int r1 = get_field(s->fields, l1);
2577 check_privileged(s);
2578 potential_page_fault(s);
2579 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2580 set_cc_static(s);
2581 return NO_EXIT;
2583 #endif
2585 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2587 potential_page_fault(s);
2588 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2589 set_cc_static(s);
2590 return NO_EXIT;
2593 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2595 potential_page_fault(s);
2596 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2597 set_cc_static(s);
2598 return_low128(o->in2);
2599 return NO_EXIT;
2602 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2604 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2605 return NO_EXIT;
2608 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2610 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2611 return NO_EXIT;
2614 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2616 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2617 return NO_EXIT;
2620 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2622 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2623 return NO_EXIT;
2626 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2628 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2629 return NO_EXIT;
2632 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2634 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2635 return_low128(o->out2);
2636 return NO_EXIT;
2639 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2641 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2642 return_low128(o->out2);
2643 return NO_EXIT;
2646 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2648 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2649 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2650 tcg_temp_free_i64(r3);
2651 return NO_EXIT;
2654 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2656 int r3 = get_field(s->fields, r3);
2657 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2658 return NO_EXIT;
2661 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2663 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2664 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2665 tcg_temp_free_i64(r3);
2666 return NO_EXIT;
2669 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2671 int r3 = get_field(s->fields, r3);
2672 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2673 return NO_EXIT;
2676 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2678 gen_helper_nabs_i64(o->out, o->in2);
2679 return NO_EXIT;
2682 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2684 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2685 return NO_EXIT;
2688 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2690 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2691 return NO_EXIT;
2694 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2696 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2697 tcg_gen_mov_i64(o->out2, o->in2);
2698 return NO_EXIT;
2701 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2703 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2704 potential_page_fault(s);
2705 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2706 tcg_temp_free_i32(l);
2707 set_cc_static(s);
2708 return NO_EXIT;
2711 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2713 tcg_gen_neg_i64(o->out, o->in2);
2714 return NO_EXIT;
2717 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2719 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2720 return NO_EXIT;
2723 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2725 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2726 return NO_EXIT;
2729 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2731 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2732 tcg_gen_mov_i64(o->out2, o->in2);
2733 return NO_EXIT;
2736 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2738 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2739 potential_page_fault(s);
2740 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2741 tcg_temp_free_i32(l);
2742 set_cc_static(s);
2743 return NO_EXIT;
2746 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2748 tcg_gen_or_i64(o->out, o->in1, o->in2);
2749 return NO_EXIT;
2752 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2754 int shift = s->insn->data & 0xff;
2755 int size = s->insn->data >> 8;
2756 uint64_t mask = ((1ull << size) - 1) << shift;
2758 assert(!o->g_in2);
2759 tcg_gen_shli_i64(o->in2, o->in2, shift);
2760 tcg_gen_or_i64(o->out, o->in1, o->in2);
2762 /* Produce the CC from only the bits manipulated. */
2763 tcg_gen_andi_i64(cc_dst, o->out, mask);
2764 set_cc_nz_u64(s, cc_dst);
2765 return NO_EXIT;
2768 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2770 gen_helper_popcnt(o->out, o->in2);
2771 return NO_EXIT;
2774 #ifndef CONFIG_USER_ONLY
2775 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2777 check_privileged(s);
2778 gen_helper_ptlb(cpu_env);
2779 return NO_EXIT;
2781 #endif
2783 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2785 int i3 = get_field(s->fields, i3);
2786 int i4 = get_field(s->fields, i4);
2787 int i5 = get_field(s->fields, i5);
2788 int do_zero = i4 & 0x80;
2789 uint64_t mask, imask, pmask;
2790 int pos, len, rot;
2792 /* Adjust the arguments for the specific insn. */
2793 switch (s->fields->op2) {
2794 case 0x55: /* risbg */
2795 i3 &= 63;
2796 i4 &= 63;
2797 pmask = ~0;
2798 break;
2799 case 0x5d: /* risbhg */
2800 i3 &= 31;
2801 i4 &= 31;
2802 pmask = 0xffffffff00000000ull;
2803 break;
2804 case 0x51: /* risblg */
2805 i3 &= 31;
2806 i4 &= 31;
2807 pmask = 0x00000000ffffffffull;
2808 break;
2809 default:
2810 abort();
2813 /* MASK is the set of bits to be inserted from R2.
2814 Take care for I3/I4 wraparound. */
2815 mask = pmask >> i3;
2816 if (i3 <= i4) {
2817 mask ^= pmask >> i4 >> 1;
2818 } else {
2819 mask |= ~(pmask >> i4 >> 1);
2821 mask &= pmask;
2823 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2824 insns, we need to keep the other half of the register. */
2825 imask = ~mask | ~pmask;
2826 if (do_zero) {
2827 if (s->fields->op2 == 0x55) {
2828 imask = 0;
2829 } else {
2830 imask = ~pmask;
2834 /* In some cases we can implement this with deposit, which can be more
2835 efficient on some hosts. */
2836 if (~mask == imask && i3 <= i4) {
2837 if (s->fields->op2 == 0x5d) {
2838 i3 += 32, i4 += 32;
2840 /* Note that we rotate the bits to be inserted to the lsb, not to
2841 the position as described in the PoO. */
2842 len = i4 - i3 + 1;
2843 pos = 63 - i4;
2844 rot = (i5 - pos) & 63;
2845 } else {
2846 pos = len = -1;
2847 rot = i5 & 63;
2850 /* Rotate the input as necessary. */
2851 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2853 /* Insert the selected bits into the output. */
2854 if (pos >= 0) {
2855 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2856 } else if (imask == 0) {
2857 tcg_gen_andi_i64(o->out, o->in2, mask);
2858 } else {
2859 tcg_gen_andi_i64(o->in2, o->in2, mask);
2860 tcg_gen_andi_i64(o->out, o->out, imask);
2861 tcg_gen_or_i64(o->out, o->out, o->in2);
2863 return NO_EXIT;
2866 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2868 int i3 = get_field(s->fields, i3);
2869 int i4 = get_field(s->fields, i4);
2870 int i5 = get_field(s->fields, i5);
2871 uint64_t mask;
2873 /* If this is a test-only form, arrange to discard the result. */
2874 if (i3 & 0x80) {
2875 o->out = tcg_temp_new_i64();
2876 o->g_out = false;
2879 i3 &= 63;
2880 i4 &= 63;
2881 i5 &= 63;
2883 /* MASK is the set of bits to be operated on from R2.
2884 Take care for I3/I4 wraparound. */
2885 mask = ~0ull >> i3;
2886 if (i3 <= i4) {
2887 mask ^= ~0ull >> i4 >> 1;
2888 } else {
2889 mask |= ~(~0ull >> i4 >> 1);
2892 /* Rotate the input as necessary. */
2893 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2895 /* Operate. */
2896 switch (s->fields->op2) {
2897 case 0x55: /* AND */
2898 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2899 tcg_gen_and_i64(o->out, o->out, o->in2);
2900 break;
2901 case 0x56: /* OR */
2902 tcg_gen_andi_i64(o->in2, o->in2, mask);
2903 tcg_gen_or_i64(o->out, o->out, o->in2);
2904 break;
2905 case 0x57: /* XOR */
2906 tcg_gen_andi_i64(o->in2, o->in2, mask);
2907 tcg_gen_xor_i64(o->out, o->out, o->in2);
2908 break;
2909 default:
2910 abort();
2913 /* Set the CC. */
2914 tcg_gen_andi_i64(cc_dst, o->out, mask);
2915 set_cc_nz_u64(s, cc_dst);
2916 return NO_EXIT;
2919 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2921 tcg_gen_bswap16_i64(o->out, o->in2);
2922 return NO_EXIT;
2925 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2927 tcg_gen_bswap32_i64(o->out, o->in2);
2928 return NO_EXIT;
2931 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2933 tcg_gen_bswap64_i64(o->out, o->in2);
2934 return NO_EXIT;
2937 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2939 TCGv_i32 t1 = tcg_temp_new_i32();
2940 TCGv_i32 t2 = tcg_temp_new_i32();
2941 TCGv_i32 to = tcg_temp_new_i32();
2942 tcg_gen_trunc_i64_i32(t1, o->in1);
2943 tcg_gen_trunc_i64_i32(t2, o->in2);
2944 tcg_gen_rotl_i32(to, t1, t2);
2945 tcg_gen_extu_i32_i64(o->out, to);
2946 tcg_temp_free_i32(t1);
2947 tcg_temp_free_i32(t2);
2948 tcg_temp_free_i32(to);
2949 return NO_EXIT;
2952 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2954 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2955 return NO_EXIT;
2958 #ifndef CONFIG_USER_ONLY
2959 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2961 check_privileged(s);
2962 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2963 set_cc_static(s);
2964 return NO_EXIT;
2967 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2969 check_privileged(s);
2970 gen_helper_sacf(cpu_env, o->in2);
2971 /* Addressing mode has changed, so end the block. */
2972 return EXIT_PC_STALE;
2974 #endif
2976 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
2978 int sam = s->insn->data;
2979 TCGv_i64 tsam;
2980 uint64_t mask;
2982 switch (sam) {
2983 case 0:
2984 mask = 0xffffff;
2985 break;
2986 case 1:
2987 mask = 0x7fffffff;
2988 break;
2989 default:
2990 mask = -1;
2991 break;
2994 /* Bizarre but true, we check the address of the current insn for the
2995 specification exception, not the next to be executed. Thus the PoO
2996 documents that Bad Things Happen two bytes before the end. */
2997 if (s->pc & ~mask) {
2998 gen_program_exception(s, PGM_SPECIFICATION);
2999 return EXIT_NORETURN;
3001 s->next_pc &= mask;
3003 tsam = tcg_const_i64(sam);
3004 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3005 tcg_temp_free_i64(tsam);
3007 /* Always exit the TB, since we (may have) changed execution mode. */
3008 return EXIT_PC_STALE;
3011 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3013 int r1 = get_field(s->fields, r1);
3014 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3015 return NO_EXIT;
3018 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3020 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3021 return NO_EXIT;
3024 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3026 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3027 return NO_EXIT;
3030 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3032 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3033 return_low128(o->out2);
3034 return NO_EXIT;
3037 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3039 gen_helper_sqeb(o->out, cpu_env, o->in2);
3040 return NO_EXIT;
3043 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3045 gen_helper_sqdb(o->out, cpu_env, o->in2);
3046 return NO_EXIT;
3049 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3051 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3052 return_low128(o->out2);
3053 return NO_EXIT;
3056 #ifndef CONFIG_USER_ONLY
3057 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3059 check_privileged(s);
3060 potential_page_fault(s);
3061 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3062 set_cc_static(s);
3063 return NO_EXIT;
3066 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3068 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3069 check_privileged(s);
3070 potential_page_fault(s);
3071 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3072 tcg_temp_free_i32(r1);
3073 return NO_EXIT;
3075 #endif
3077 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3079 DisasCompare c;
3080 TCGv_i64 a;
3081 TCGLabel *lab;
3082 int r1;
3084 disas_jcc(s, &c, get_field(s->fields, m3));
3086 /* We want to store when the condition is fulfilled, so branch
3087 out when it's not */
3088 c.cond = tcg_invert_cond(c.cond);
3090 lab = gen_new_label();
3091 if (c.is_64) {
3092 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3093 } else {
3094 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3096 free_compare(&c);
3098 r1 = get_field(s->fields, r1);
3099 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3100 if (s->insn->data) {
3101 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3102 } else {
3103 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3105 tcg_temp_free_i64(a);
3107 gen_set_label(lab);
3108 return NO_EXIT;
3111 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3113 uint64_t sign = 1ull << s->insn->data;
3114 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3115 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3116 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3117 /* The arithmetic left shift is curious in that it does not affect
3118 the sign bit. Copy that over from the source unchanged. */
3119 tcg_gen_andi_i64(o->out, o->out, ~sign);
3120 tcg_gen_andi_i64(o->in1, o->in1, sign);
3121 tcg_gen_or_i64(o->out, o->out, o->in1);
3122 return NO_EXIT;
3125 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3127 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3128 return NO_EXIT;
3131 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3133 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3134 return NO_EXIT;
3137 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3139 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3140 return NO_EXIT;
3143 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3145 gen_helper_sfpc(cpu_env, o->in2);
3146 return NO_EXIT;
3149 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3151 gen_helper_sfas(cpu_env, o->in2);
3152 return NO_EXIT;
3155 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3157 int b2 = get_field(s->fields, b2);
3158 int d2 = get_field(s->fields, d2);
3159 TCGv_i64 t1 = tcg_temp_new_i64();
3160 TCGv_i64 t2 = tcg_temp_new_i64();
3161 int mask, pos, len;
3163 switch (s->fields->op2) {
3164 case 0x99: /* SRNM */
3165 pos = 0, len = 2;
3166 break;
3167 case 0xb8: /* SRNMB */
3168 pos = 0, len = 3;
3169 break;
3170 case 0xb9: /* SRNMT */
3171 pos = 4, len = 3;
3172 break;
3173 default:
3174 tcg_abort();
3176 mask = (1 << len) - 1;
3178 /* Insert the value into the appropriate field of the FPC. */
3179 if (b2 == 0) {
3180 tcg_gen_movi_i64(t1, d2 & mask);
3181 } else {
3182 tcg_gen_addi_i64(t1, regs[b2], d2);
3183 tcg_gen_andi_i64(t1, t1, mask);
3185 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3186 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3187 tcg_temp_free_i64(t1);
3189 /* Then install the new FPC to set the rounding mode in fpu_status. */
3190 gen_helper_sfpc(cpu_env, t2);
3191 tcg_temp_free_i64(t2);
3192 return NO_EXIT;
3195 #ifndef CONFIG_USER_ONLY
3196 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3198 check_privileged(s);
3199 tcg_gen_shri_i64(o->in2, o->in2, 4);
3200 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3201 return NO_EXIT;
3204 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3206 check_privileged(s);
3207 gen_helper_sske(cpu_env, o->in1, o->in2);
3208 return NO_EXIT;
3211 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3213 check_privileged(s);
3214 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3215 return NO_EXIT;
3218 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3220 check_privileged(s);
3221 /* ??? Surely cpu address != cpu number. In any case the previous
3222 version of this stored more than the required half-word, so it
3223 is unlikely this has ever been tested. */
3224 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3225 return NO_EXIT;
3228 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3230 gen_helper_stck(o->out, cpu_env);
3231 /* ??? We don't implement clock states. */
3232 gen_op_movi_cc(s, 0);
3233 return NO_EXIT;
3236 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3238 TCGv_i64 c1 = tcg_temp_new_i64();
3239 TCGv_i64 c2 = tcg_temp_new_i64();
3240 gen_helper_stck(c1, cpu_env);
3241 /* Shift the 64-bit value into its place as a zero-extended
3242 104-bit value. Note that "bit positions 64-103 are always
3243 non-zero so that they compare differently to STCK"; we set
3244 the least significant bit to 1. */
3245 tcg_gen_shli_i64(c2, c1, 56);
3246 tcg_gen_shri_i64(c1, c1, 8);
3247 tcg_gen_ori_i64(c2, c2, 0x10000);
3248 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3249 tcg_gen_addi_i64(o->in2, o->in2, 8);
3250 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3251 tcg_temp_free_i64(c1);
3252 tcg_temp_free_i64(c2);
3253 /* ??? We don't implement clock states. */
3254 gen_op_movi_cc(s, 0);
3255 return NO_EXIT;
3258 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3260 check_privileged(s);
3261 gen_helper_sckc(cpu_env, o->in2);
3262 return NO_EXIT;
3265 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3267 check_privileged(s);
3268 gen_helper_stckc(o->out, cpu_env);
3269 return NO_EXIT;
3272 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3274 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3275 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3276 check_privileged(s);
3277 potential_page_fault(s);
3278 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3279 tcg_temp_free_i32(r1);
3280 tcg_temp_free_i32(r3);
3281 return NO_EXIT;
3284 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3286 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3287 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3288 check_privileged(s);
3289 potential_page_fault(s);
3290 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3291 tcg_temp_free_i32(r1);
3292 tcg_temp_free_i32(r3);
3293 return NO_EXIT;
3296 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3298 TCGv_i64 t1 = tcg_temp_new_i64();
3300 check_privileged(s);
3301 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3302 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3303 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3304 tcg_temp_free_i64(t1);
3306 return NO_EXIT;
3309 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3311 check_privileged(s);
3312 gen_helper_spt(cpu_env, o->in2);
3313 return NO_EXIT;
3316 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3318 TCGv_i64 f, a;
3319 /* We really ought to have more complete indication of facilities
3320 that we implement. Address this when STFLE is implemented. */
3321 check_privileged(s);
3322 f = tcg_const_i64(0xc0000000);
3323 a = tcg_const_i64(200);
3324 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3325 tcg_temp_free_i64(f);
3326 tcg_temp_free_i64(a);
3327 return NO_EXIT;
3330 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3332 check_privileged(s);
3333 gen_helper_stpt(o->out, cpu_env);
3334 return NO_EXIT;
3337 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3339 check_privileged(s);
3340 potential_page_fault(s);
3341 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3342 set_cc_static(s);
3343 return NO_EXIT;
3346 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3348 check_privileged(s);
3349 gen_helper_spx(cpu_env, o->in2);
3350 return NO_EXIT;
3353 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3355 check_privileged(s);
3356 /* Not operational. */
3357 gen_op_movi_cc(s, 3);
3358 return NO_EXIT;
3361 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3363 check_privileged(s);
3364 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3365 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3366 return NO_EXIT;
3369 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3371 uint64_t i2 = get_field(s->fields, i2);
3372 TCGv_i64 t;
3374 check_privileged(s);
3376 /* It is important to do what the instruction name says: STORE THEN.
3377 If we let the output hook perform the store then if we fault and
3378 restart, we'll have the wrong SYSTEM MASK in place. */
3379 t = tcg_temp_new_i64();
3380 tcg_gen_shri_i64(t, psw_mask, 56);
3381 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3382 tcg_temp_free_i64(t);
3384 if (s->fields->op == 0xac) {
3385 tcg_gen_andi_i64(psw_mask, psw_mask,
3386 (i2 << 56) | 0x00ffffffffffffffull);
3387 } else {
3388 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3390 return NO_EXIT;
3393 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3395 check_privileged(s);
3396 potential_page_fault(s);
3397 gen_helper_stura(cpu_env, o->in2, o->in1);
3398 return NO_EXIT;
3401 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3403 check_privileged(s);
3404 potential_page_fault(s);
3405 gen_helper_sturg(cpu_env, o->in2, o->in1);
3406 return NO_EXIT;
3408 #endif
3410 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3412 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3413 return NO_EXIT;
3416 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3418 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3419 return NO_EXIT;
3422 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3424 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3425 return NO_EXIT;
3428 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3430 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3431 return NO_EXIT;
3434 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3436 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3437 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3438 potential_page_fault(s);
3439 gen_helper_stam(cpu_env, r1, o->in2, r3);
3440 tcg_temp_free_i32(r1);
3441 tcg_temp_free_i32(r3);
3442 return NO_EXIT;
3445 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3447 int m3 = get_field(s->fields, m3);
3448 int pos, base = s->insn->data;
3449 TCGv_i64 tmp = tcg_temp_new_i64();
3451 pos = base + ctz32(m3) * 8;
3452 switch (m3) {
3453 case 0xf:
3454 /* Effectively a 32-bit store. */
3455 tcg_gen_shri_i64(tmp, o->in1, pos);
3456 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3457 break;
3459 case 0xc:
3460 case 0x6:
3461 case 0x3:
3462 /* Effectively a 16-bit store. */
3463 tcg_gen_shri_i64(tmp, o->in1, pos);
3464 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3465 break;
3467 case 0x8:
3468 case 0x4:
3469 case 0x2:
3470 case 0x1:
3471 /* Effectively an 8-bit store. */
3472 tcg_gen_shri_i64(tmp, o->in1, pos);
3473 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3474 break;
3476 default:
3477 /* This is going to be a sequence of shifts and stores. */
3478 pos = base + 32 - 8;
3479 while (m3) {
3480 if (m3 & 0x8) {
3481 tcg_gen_shri_i64(tmp, o->in1, pos);
3482 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3483 tcg_gen_addi_i64(o->in2, o->in2, 1);
3485 m3 = (m3 << 1) & 0xf;
3486 pos -= 8;
3488 break;
3490 tcg_temp_free_i64(tmp);
3491 return NO_EXIT;
3494 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3496 int r1 = get_field(s->fields, r1);
3497 int r3 = get_field(s->fields, r3);
3498 int size = s->insn->data;
3499 TCGv_i64 tsize = tcg_const_i64(size);
3501 while (1) {
3502 if (size == 8) {
3503 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3504 } else {
3505 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3507 if (r1 == r3) {
3508 break;
3510 tcg_gen_add_i64(o->in2, o->in2, tsize);
3511 r1 = (r1 + 1) & 15;
3514 tcg_temp_free_i64(tsize);
3515 return NO_EXIT;
3518 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3520 int r1 = get_field(s->fields, r1);
3521 int r3 = get_field(s->fields, r3);
3522 TCGv_i64 t = tcg_temp_new_i64();
3523 TCGv_i64 t4 = tcg_const_i64(4);
3524 TCGv_i64 t32 = tcg_const_i64(32);
3526 while (1) {
3527 tcg_gen_shl_i64(t, regs[r1], t32);
3528 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3529 if (r1 == r3) {
3530 break;
3532 tcg_gen_add_i64(o->in2, o->in2, t4);
3533 r1 = (r1 + 1) & 15;
3536 tcg_temp_free_i64(t);
3537 tcg_temp_free_i64(t4);
3538 tcg_temp_free_i64(t32);
3539 return NO_EXIT;
3542 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3544 potential_page_fault(s);
3545 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3546 set_cc_static(s);
3547 return_low128(o->in2);
3548 return NO_EXIT;
3551 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3553 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3554 return NO_EXIT;
3557 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3559 DisasCompare cmp;
3560 TCGv_i64 borrow;
3562 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3564 /* The !borrow flag is the msb of CC. Since we want the inverse of
3565 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3566 disas_jcc(s, &cmp, 8 | 4);
3567 borrow = tcg_temp_new_i64();
3568 if (cmp.is_64) {
3569 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3570 } else {
3571 TCGv_i32 t = tcg_temp_new_i32();
3572 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3573 tcg_gen_extu_i32_i64(borrow, t);
3574 tcg_temp_free_i32(t);
3576 free_compare(&cmp);
3578 tcg_gen_sub_i64(o->out, o->out, borrow);
3579 tcg_temp_free_i64(borrow);
3580 return NO_EXIT;
3583 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3585 TCGv_i32 t;
3587 update_psw_addr(s);
3588 update_cc_op(s);
3590 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3591 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3592 tcg_temp_free_i32(t);
3594 t = tcg_const_i32(s->next_pc - s->pc);
3595 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3596 tcg_temp_free_i32(t);
3598 gen_exception(EXCP_SVC);
3599 return EXIT_NORETURN;
3602 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3604 gen_helper_tceb(cc_op, o->in1, o->in2);
3605 set_cc_static(s);
3606 return NO_EXIT;
3609 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3611 gen_helper_tcdb(cc_op, o->in1, o->in2);
3612 set_cc_static(s);
3613 return NO_EXIT;
3616 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3618 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3619 set_cc_static(s);
3620 return NO_EXIT;
3623 #ifndef CONFIG_USER_ONLY
3624 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3626 potential_page_fault(s);
3627 gen_helper_tprot(cc_op, o->addr1, o->in2);
3628 set_cc_static(s);
3629 return NO_EXIT;
3631 #endif
3633 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3635 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3636 potential_page_fault(s);
3637 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3638 tcg_temp_free_i32(l);
3639 set_cc_static(s);
3640 return NO_EXIT;
3643 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3645 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3646 potential_page_fault(s);
3647 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3648 tcg_temp_free_i32(l);
3649 return NO_EXIT;
3652 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3654 int d1 = get_field(s->fields, d1);
3655 int d2 = get_field(s->fields, d2);
3656 int b1 = get_field(s->fields, b1);
3657 int b2 = get_field(s->fields, b2);
3658 int l = get_field(s->fields, l1);
3659 TCGv_i32 t32;
3661 o->addr1 = get_address(s, 0, b1, d1);
3663 /* If the addresses are identical, this is a store/memset of zero. */
3664 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3665 o->in2 = tcg_const_i64(0);
3667 l++;
3668 while (l >= 8) {
3669 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3670 l -= 8;
3671 if (l > 0) {
3672 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3675 if (l >= 4) {
3676 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3677 l -= 4;
3678 if (l > 0) {
3679 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3682 if (l >= 2) {
3683 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3684 l -= 2;
3685 if (l > 0) {
3686 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3689 if (l) {
3690 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3692 gen_op_movi_cc(s, 0);
3693 return NO_EXIT;
3696 /* But in general we'll defer to a helper. */
3697 o->in2 = get_address(s, 0, b2, d2);
3698 t32 = tcg_const_i32(l);
3699 potential_page_fault(s);
3700 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3701 tcg_temp_free_i32(t32);
3702 set_cc_static(s);
3703 return NO_EXIT;
3706 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3708 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3709 return NO_EXIT;
3712 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3714 int shift = s->insn->data & 0xff;
3715 int size = s->insn->data >> 8;
3716 uint64_t mask = ((1ull << size) - 1) << shift;
3718 assert(!o->g_in2);
3719 tcg_gen_shli_i64(o->in2, o->in2, shift);
3720 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3722 /* Produce the CC from only the bits manipulated. */
3723 tcg_gen_andi_i64(cc_dst, o->out, mask);
3724 set_cc_nz_u64(s, cc_dst);
3725 return NO_EXIT;
3728 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3730 o->out = tcg_const_i64(0);
3731 return NO_EXIT;
3734 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3736 o->out = tcg_const_i64(0);
3737 o->out2 = o->out;
3738 o->g_out2 = true;
3739 return NO_EXIT;
3742 /* ====================================================================== */
3743 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3744 the original inputs), update the various cc data structures in order to
3745 be able to compute the new condition code. */
3747 static void cout_abs32(DisasContext *s, DisasOps *o)
3749 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3752 static void cout_abs64(DisasContext *s, DisasOps *o)
3754 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3757 static void cout_adds32(DisasContext *s, DisasOps *o)
3759 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3762 static void cout_adds64(DisasContext *s, DisasOps *o)
3764 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3767 static void cout_addu32(DisasContext *s, DisasOps *o)
3769 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3772 static void cout_addu64(DisasContext *s, DisasOps *o)
3774 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3777 static void cout_addc32(DisasContext *s, DisasOps *o)
3779 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3782 static void cout_addc64(DisasContext *s, DisasOps *o)
3784 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3787 static void cout_cmps32(DisasContext *s, DisasOps *o)
3789 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3792 static void cout_cmps64(DisasContext *s, DisasOps *o)
3794 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3797 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3799 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3802 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3804 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3807 static void cout_f32(DisasContext *s, DisasOps *o)
3809 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3812 static void cout_f64(DisasContext *s, DisasOps *o)
3814 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3817 static void cout_f128(DisasContext *s, DisasOps *o)
3819 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3822 static void cout_nabs32(DisasContext *s, DisasOps *o)
3824 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3827 static void cout_nabs64(DisasContext *s, DisasOps *o)
3829 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3832 static void cout_neg32(DisasContext *s, DisasOps *o)
3834 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3837 static void cout_neg64(DisasContext *s, DisasOps *o)
3839 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3842 static void cout_nz32(DisasContext *s, DisasOps *o)
3844 tcg_gen_ext32u_i64(cc_dst, o->out);
3845 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3848 static void cout_nz64(DisasContext *s, DisasOps *o)
3850 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3853 static void cout_s32(DisasContext *s, DisasOps *o)
3855 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3858 static void cout_s64(DisasContext *s, DisasOps *o)
3860 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3863 static void cout_subs32(DisasContext *s, DisasOps *o)
3865 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3868 static void cout_subs64(DisasContext *s, DisasOps *o)
3870 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3873 static void cout_subu32(DisasContext *s, DisasOps *o)
3875 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3878 static void cout_subu64(DisasContext *s, DisasOps *o)
3880 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3883 static void cout_subb32(DisasContext *s, DisasOps *o)
3885 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3888 static void cout_subb64(DisasContext *s, DisasOps *o)
3890 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3893 static void cout_tm32(DisasContext *s, DisasOps *o)
3895 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3898 static void cout_tm64(DisasContext *s, DisasOps *o)
3900 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3903 /* ====================================================================== */
3904 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3905 with the TCG register to which we will write. Used in combination with
3906 the "wout" generators, in some cases we need a new temporary, and in
3907 some cases we can write to a TCG global. */
3909 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3911 o->out = tcg_temp_new_i64();
3913 #define SPEC_prep_new 0
3915 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3917 o->out = tcg_temp_new_i64();
3918 o->out2 = tcg_temp_new_i64();
3920 #define SPEC_prep_new_P 0
3922 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3924 o->out = regs[get_field(f, r1)];
3925 o->g_out = true;
3927 #define SPEC_prep_r1 0
3929 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3931 int r1 = get_field(f, r1);
3932 o->out = regs[r1];
3933 o->out2 = regs[r1 + 1];
3934 o->g_out = o->g_out2 = true;
3936 #define SPEC_prep_r1_P SPEC_r1_even
3938 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3940 o->out = fregs[get_field(f, r1)];
3941 o->g_out = true;
3943 #define SPEC_prep_f1 0
3945 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3947 int r1 = get_field(f, r1);
3948 o->out = fregs[r1];
3949 o->out2 = fregs[r1 + 2];
3950 o->g_out = o->g_out2 = true;
3952 #define SPEC_prep_x1 SPEC_r1_f128
3954 /* ====================================================================== */
3955 /* The "Write OUTput" generators. These generally perform some non-trivial
3956 copy of data to TCG globals, or to main memory. The trivial cases are
3957 generally handled by having a "prep" generator install the TCG global
3958 as the destination of the operation. */
3960 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3962 store_reg(get_field(f, r1), o->out);
3964 #define SPEC_wout_r1 0
3966 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3968 int r1 = get_field(f, r1);
3969 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3971 #define SPEC_wout_r1_8 0
3973 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3975 int r1 = get_field(f, r1);
3976 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3978 #define SPEC_wout_r1_16 0
3980 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3982 store_reg32_i64(get_field(f, r1), o->out);
3984 #define SPEC_wout_r1_32 0
3986 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3988 int r1 = get_field(f, r1);
3989 store_reg32_i64(r1, o->out);
3990 store_reg32_i64(r1 + 1, o->out2);
3992 #define SPEC_wout_r1_P32 SPEC_r1_even
3994 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3996 int r1 = get_field(f, r1);
3997 store_reg32_i64(r1 + 1, o->out);
3998 tcg_gen_shri_i64(o->out, o->out, 32);
3999 store_reg32_i64(r1, o->out);
4001 #define SPEC_wout_r1_D32 SPEC_r1_even
4003 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4005 store_freg32_i64(get_field(f, r1), o->out);
4007 #define SPEC_wout_e1 0
4009 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4011 store_freg(get_field(f, r1), o->out);
4013 #define SPEC_wout_f1 0
4015 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4017 int f1 = get_field(s->fields, r1);
4018 store_freg(f1, o->out);
4019 store_freg(f1 + 2, o->out2);
4021 #define SPEC_wout_x1 SPEC_r1_f128
4023 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4025 if (get_field(f, r1) != get_field(f, r2)) {
4026 store_reg32_i64(get_field(f, r1), o->out);
4029 #define SPEC_wout_cond_r1r2_32 0
4031 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4033 if (get_field(f, r1) != get_field(f, r2)) {
4034 store_freg32_i64(get_field(f, r1), o->out);
4037 #define SPEC_wout_cond_e1e2 0
4039 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4041 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4043 #define SPEC_wout_m1_8 0
4045 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4047 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4049 #define SPEC_wout_m1_16 0
4051 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4053 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4055 #define SPEC_wout_m1_32 0
4057 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4059 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4061 #define SPEC_wout_m1_64 0
4063 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4065 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4067 #define SPEC_wout_m2_32 0
4069 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4071 /* XXX release reservation */
4072 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4073 store_reg32_i64(get_field(f, r1), o->in2);
4075 #define SPEC_wout_m2_32_r1_atomic 0
4077 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4079 /* XXX release reservation */
4080 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4081 store_reg(get_field(f, r1), o->in2);
4083 #define SPEC_wout_m2_64_r1_atomic 0
4085 /* ====================================================================== */
4086 /* The "INput 1" generators. These load the first operand to an insn. */
4088 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4090 o->in1 = load_reg(get_field(f, r1));
4092 #define SPEC_in1_r1 0
4094 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4096 o->in1 = regs[get_field(f, r1)];
4097 o->g_in1 = true;
4099 #define SPEC_in1_r1_o 0
4101 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4103 o->in1 = tcg_temp_new_i64();
4104 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4106 #define SPEC_in1_r1_32s 0
4108 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4110 o->in1 = tcg_temp_new_i64();
4111 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4113 #define SPEC_in1_r1_32u 0
4115 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4117 o->in1 = tcg_temp_new_i64();
4118 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4120 #define SPEC_in1_r1_sr32 0
4122 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4124 o->in1 = load_reg(get_field(f, r1) + 1);
4126 #define SPEC_in1_r1p1 SPEC_r1_even
4128 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4130 o->in1 = tcg_temp_new_i64();
4131 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4133 #define SPEC_in1_r1p1_32s SPEC_r1_even
4135 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4137 o->in1 = tcg_temp_new_i64();
4138 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4140 #define SPEC_in1_r1p1_32u SPEC_r1_even
4142 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4144 int r1 = get_field(f, r1);
4145 o->in1 = tcg_temp_new_i64();
4146 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4148 #define SPEC_in1_r1_D32 SPEC_r1_even
4150 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4152 o->in1 = load_reg(get_field(f, r2));
4154 #define SPEC_in1_r2 0
4156 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4158 o->in1 = load_reg(get_field(f, r3));
4160 #define SPEC_in1_r3 0
4162 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4164 o->in1 = regs[get_field(f, r3)];
4165 o->g_in1 = true;
4167 #define SPEC_in1_r3_o 0
4169 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4171 o->in1 = tcg_temp_new_i64();
4172 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4174 #define SPEC_in1_r3_32s 0
4176 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4178 o->in1 = tcg_temp_new_i64();
4179 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4181 #define SPEC_in1_r3_32u 0
4183 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4185 int r3 = get_field(f, r3);
4186 o->in1 = tcg_temp_new_i64();
4187 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4189 #define SPEC_in1_r3_D32 SPEC_r3_even
4191 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4193 o->in1 = load_freg32_i64(get_field(f, r1));
4195 #define SPEC_in1_e1 0
4197 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4199 o->in1 = fregs[get_field(f, r1)];
4200 o->g_in1 = true;
4202 #define SPEC_in1_f1_o 0
4204 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4206 int r1 = get_field(f, r1);
4207 o->out = fregs[r1];
4208 o->out2 = fregs[r1 + 2];
4209 o->g_out = o->g_out2 = true;
4211 #define SPEC_in1_x1_o SPEC_r1_f128
4213 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4215 o->in1 = fregs[get_field(f, r3)];
4216 o->g_in1 = true;
4218 #define SPEC_in1_f3_o 0
4220 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4222 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4224 #define SPEC_in1_la1 0
4226 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4228 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4229 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4231 #define SPEC_in1_la2 0
4233 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4235 in1_la1(s, f, o);
4236 o->in1 = tcg_temp_new_i64();
4237 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4239 #define SPEC_in1_m1_8u 0
4241 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4243 in1_la1(s, f, o);
4244 o->in1 = tcg_temp_new_i64();
4245 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4247 #define SPEC_in1_m1_16s 0
4249 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4251 in1_la1(s, f, o);
4252 o->in1 = tcg_temp_new_i64();
4253 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4255 #define SPEC_in1_m1_16u 0
4257 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4259 in1_la1(s, f, o);
4260 o->in1 = tcg_temp_new_i64();
4261 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4263 #define SPEC_in1_m1_32s 0
4265 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4267 in1_la1(s, f, o);
4268 o->in1 = tcg_temp_new_i64();
4269 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4271 #define SPEC_in1_m1_32u 0
4273 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4275 in1_la1(s, f, o);
4276 o->in1 = tcg_temp_new_i64();
4277 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4279 #define SPEC_in1_m1_64 0
4281 /* ====================================================================== */
4282 /* The "INput 2" generators. These load the second operand to an insn. */
4284 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4286 o->in2 = regs[get_field(f, r1)];
4287 o->g_in2 = true;
4289 #define SPEC_in2_r1_o 0
4291 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4293 o->in2 = tcg_temp_new_i64();
4294 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4296 #define SPEC_in2_r1_16u 0
4298 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4300 o->in2 = tcg_temp_new_i64();
4301 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4303 #define SPEC_in2_r1_32u 0
4305 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4307 int r1 = get_field(f, r1);
4308 o->in2 = tcg_temp_new_i64();
4309 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4311 #define SPEC_in2_r1_D32 SPEC_r1_even
4313 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4315 o->in2 = load_reg(get_field(f, r2));
4317 #define SPEC_in2_r2 0
4319 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4321 o->in2 = regs[get_field(f, r2)];
4322 o->g_in2 = true;
4324 #define SPEC_in2_r2_o 0
4326 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4328 int r2 = get_field(f, r2);
4329 if (r2 != 0) {
4330 o->in2 = load_reg(r2);
4333 #define SPEC_in2_r2_nz 0
4335 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4337 o->in2 = tcg_temp_new_i64();
4338 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4340 #define SPEC_in2_r2_8s 0
4342 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4344 o->in2 = tcg_temp_new_i64();
4345 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4347 #define SPEC_in2_r2_8u 0
4349 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4351 o->in2 = tcg_temp_new_i64();
4352 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4354 #define SPEC_in2_r2_16s 0
4356 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4358 o->in2 = tcg_temp_new_i64();
4359 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4361 #define SPEC_in2_r2_16u 0
4363 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4365 o->in2 = load_reg(get_field(f, r3));
4367 #define SPEC_in2_r3 0
4369 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4371 o->in2 = tcg_temp_new_i64();
4372 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4374 #define SPEC_in2_r2_32s 0
4376 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4378 o->in2 = tcg_temp_new_i64();
4379 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4381 #define SPEC_in2_r2_32u 0
4383 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4385 o->in2 = load_freg32_i64(get_field(f, r2));
4387 #define SPEC_in2_e2 0
4389 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4391 o->in2 = fregs[get_field(f, r2)];
4392 o->g_in2 = true;
4394 #define SPEC_in2_f2_o 0
4396 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4398 int r2 = get_field(f, r2);
4399 o->in1 = fregs[r2];
4400 o->in2 = fregs[r2 + 2];
4401 o->g_in1 = o->g_in2 = true;
4403 #define SPEC_in2_x2_o SPEC_r2_f128
4405 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4407 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4409 #define SPEC_in2_ra2 0
4411 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4413 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4414 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4416 #define SPEC_in2_a2 0
4418 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4420 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4422 #define SPEC_in2_ri2 0
4424 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4426 help_l2_shift(s, f, o, 31);
4428 #define SPEC_in2_sh32 0
4430 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4432 help_l2_shift(s, f, o, 63);
4434 #define SPEC_in2_sh64 0
4436 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4438 in2_a2(s, f, o);
4439 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4441 #define SPEC_in2_m2_8u 0
4443 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4445 in2_a2(s, f, o);
4446 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4448 #define SPEC_in2_m2_16s 0
4450 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4452 in2_a2(s, f, o);
4453 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4455 #define SPEC_in2_m2_16u 0
4457 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4459 in2_a2(s, f, o);
4460 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4462 #define SPEC_in2_m2_32s 0
4464 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4466 in2_a2(s, f, o);
4467 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4469 #define SPEC_in2_m2_32u 0
4471 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4473 in2_a2(s, f, o);
4474 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4476 #define SPEC_in2_m2_64 0
4478 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4480 in2_ri2(s, f, o);
4481 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4483 #define SPEC_in2_mri2_16u 0
4485 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4487 in2_ri2(s, f, o);
4488 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4490 #define SPEC_in2_mri2_32s 0
4492 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4494 in2_ri2(s, f, o);
4495 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4497 #define SPEC_in2_mri2_32u 0
4499 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4501 in2_ri2(s, f, o);
4502 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4504 #define SPEC_in2_mri2_64 0
4506 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4508 /* XXX should reserve the address */
4509 in1_la2(s, f, o);
4510 o->in2 = tcg_temp_new_i64();
4511 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4513 #define SPEC_in2_m2_32s_atomic 0
4515 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4517 /* XXX should reserve the address */
4518 in1_la2(s, f, o);
4519 o->in2 = tcg_temp_new_i64();
4520 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4522 #define SPEC_in2_m2_64_atomic 0
4524 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4526 o->in2 = tcg_const_i64(get_field(f, i2));
4528 #define SPEC_in2_i2 0
4530 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4532 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4534 #define SPEC_in2_i2_8u 0
4536 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4538 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4540 #define SPEC_in2_i2_16u 0
4542 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4544 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4546 #define SPEC_in2_i2_32u 0
4548 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4550 uint64_t i2 = (uint16_t)get_field(f, i2);
4551 o->in2 = tcg_const_i64(i2 << s->insn->data);
4553 #define SPEC_in2_i2_16u_shl 0
4555 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4557 uint64_t i2 = (uint32_t)get_field(f, i2);
4558 o->in2 = tcg_const_i64(i2 << s->insn->data);
4560 #define SPEC_in2_i2_32u_shl 0
4562 /* ====================================================================== */
4564 /* Find opc within the table of insns. This is formulated as a switch
4565 statement so that (1) we get compile-time notice of cut-paste errors
4566 for duplicated opcodes, and (2) the compiler generates the binary
4567 search tree, rather than us having to post-process the table. */
4569 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4570 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4572 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4574 enum DisasInsnEnum {
4575 #include "insn-data.def"
4578 #undef D
4579 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4580 .opc = OPC, \
4581 .fmt = FMT_##FT, \
4582 .fac = FAC_##FC, \
4583 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4584 .name = #NM, \
4585 .help_in1 = in1_##I1, \
4586 .help_in2 = in2_##I2, \
4587 .help_prep = prep_##P, \
4588 .help_wout = wout_##W, \
4589 .help_cout = cout_##CC, \
4590 .help_op = op_##OP, \
4591 .data = D \
4594 /* Allow 0 to be used for NULL in the table below. */
4595 #define in1_0 NULL
4596 #define in2_0 NULL
4597 #define prep_0 NULL
4598 #define wout_0 NULL
4599 #define cout_0 NULL
4600 #define op_0 NULL
4602 #define SPEC_in1_0 0
4603 #define SPEC_in2_0 0
4604 #define SPEC_prep_0 0
4605 #define SPEC_wout_0 0
4607 static const DisasInsn insn_info[] = {
4608 #include "insn-data.def"
4611 #undef D
4612 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4613 case OPC: return &insn_info[insn_ ## NM];
4615 static const DisasInsn *lookup_opc(uint16_t opc)
4617 switch (opc) {
4618 #include "insn-data.def"
4619 default:
4620 return NULL;
4624 #undef D
4625 #undef C
4627 /* Extract a field from the insn. The INSN should be left-aligned in
4628 the uint64_t so that we can more easily utilize the big-bit-endian
4629 definitions we extract from the Principals of Operation. */
4631 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4633 uint32_t r, m;
4635 if (f->size == 0) {
4636 return;
4639 /* Zero extract the field from the insn. */
4640 r = (insn << f->beg) >> (64 - f->size);
4642 /* Sign-extend, or un-swap the field as necessary. */
4643 switch (f->type) {
4644 case 0: /* unsigned */
4645 break;
4646 case 1: /* signed */
4647 assert(f->size <= 32);
4648 m = 1u << (f->size - 1);
4649 r = (r ^ m) - m;
4650 break;
4651 case 2: /* dl+dh split, signed 20 bit. */
4652 r = ((int8_t)r << 12) | (r >> 8);
4653 break;
4654 default:
4655 abort();
4658 /* Validate that the "compressed" encoding we selected above is valid.
4659 I.e. we havn't make two different original fields overlap. */
4660 assert(((o->presentC >> f->indexC) & 1) == 0);
4661 o->presentC |= 1 << f->indexC;
4662 o->presentO |= 1 << f->indexO;
4664 o->c[f->indexC] = r;
4667 /* Lookup the insn at the current PC, extracting the operands into O and
4668 returning the info struct for the insn. Returns NULL for invalid insn. */
4670 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4671 DisasFields *f)
4673 uint64_t insn, pc = s->pc;
4674 int op, op2, ilen;
4675 const DisasInsn *info;
4677 insn = ld_code2(env, pc);
4678 op = (insn >> 8) & 0xff;
4679 ilen = get_ilen(op);
4680 s->next_pc = s->pc + ilen;
4682 switch (ilen) {
4683 case 2:
4684 insn = insn << 48;
4685 break;
4686 case 4:
4687 insn = ld_code4(env, pc) << 32;
4688 break;
4689 case 6:
4690 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4691 break;
4692 default:
4693 abort();
4696 /* We can't actually determine the insn format until we've looked up
4697 the full insn opcode. Which we can't do without locating the
4698 secondary opcode. Assume by default that OP2 is at bit 40; for
4699 those smaller insns that don't actually have a secondary opcode
4700 this will correctly result in OP2 = 0. */
4701 switch (op) {
4702 case 0x01: /* E */
4703 case 0x80: /* S */
4704 case 0x82: /* S */
4705 case 0x93: /* S */
4706 case 0xb2: /* S, RRF, RRE */
4707 case 0xb3: /* RRE, RRD, RRF */
4708 case 0xb9: /* RRE, RRF */
4709 case 0xe5: /* SSE, SIL */
4710 op2 = (insn << 8) >> 56;
4711 break;
4712 case 0xa5: /* RI */
4713 case 0xa7: /* RI */
4714 case 0xc0: /* RIL */
4715 case 0xc2: /* RIL */
4716 case 0xc4: /* RIL */
4717 case 0xc6: /* RIL */
4718 case 0xc8: /* SSF */
4719 case 0xcc: /* RIL */
4720 op2 = (insn << 12) >> 60;
4721 break;
4722 case 0xd0 ... 0xdf: /* SS */
4723 case 0xe1: /* SS */
4724 case 0xe2: /* SS */
4725 case 0xe8: /* SS */
4726 case 0xe9: /* SS */
4727 case 0xea: /* SS */
4728 case 0xee ... 0xf3: /* SS */
4729 case 0xf8 ... 0xfd: /* SS */
4730 op2 = 0;
4731 break;
4732 default:
4733 op2 = (insn << 40) >> 56;
4734 break;
4737 memset(f, 0, sizeof(*f));
4738 f->op = op;
4739 f->op2 = op2;
4741 /* Lookup the instruction. */
4742 info = lookup_opc(op << 8 | op2);
4744 /* If we found it, extract the operands. */
4745 if (info != NULL) {
4746 DisasFormat fmt = info->fmt;
4747 int i;
4749 for (i = 0; i < NUM_C_FIELD; ++i) {
4750 extract_field(f, &format_info[fmt].op[i], insn);
4753 return info;
4756 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4758 const DisasInsn *insn;
4759 ExitStatus ret = NO_EXIT;
4760 DisasFields f;
4761 DisasOps o;
4763 /* Search for the insn in the table. */
4764 insn = extract_insn(env, s, &f);
4766 /* Not found means unimplemented/illegal opcode. */
4767 if (insn == NULL) {
4768 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4769 f.op, f.op2);
4770 gen_illegal_opcode(s);
4771 return EXIT_NORETURN;
4774 /* Check for insn specification exceptions. */
4775 if (insn->spec) {
4776 int spec = insn->spec, excp = 0, r;
4778 if (spec & SPEC_r1_even) {
4779 r = get_field(&f, r1);
4780 if (r & 1) {
4781 excp = PGM_SPECIFICATION;
4784 if (spec & SPEC_r2_even) {
4785 r = get_field(&f, r2);
4786 if (r & 1) {
4787 excp = PGM_SPECIFICATION;
4790 if (spec & SPEC_r3_even) {
4791 r = get_field(&f, r3);
4792 if (r & 1) {
4793 excp = PGM_SPECIFICATION;
4796 if (spec & SPEC_r1_f128) {
4797 r = get_field(&f, r1);
4798 if (r > 13) {
4799 excp = PGM_SPECIFICATION;
4802 if (spec & SPEC_r2_f128) {
4803 r = get_field(&f, r2);
4804 if (r > 13) {
4805 excp = PGM_SPECIFICATION;
4808 if (excp) {
4809 gen_program_exception(s, excp);
4810 return EXIT_NORETURN;
4814 /* Set up the strutures we use to communicate with the helpers. */
4815 s->insn = insn;
4816 s->fields = &f;
4817 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4818 TCGV_UNUSED_I64(o.out);
4819 TCGV_UNUSED_I64(o.out2);
4820 TCGV_UNUSED_I64(o.in1);
4821 TCGV_UNUSED_I64(o.in2);
4822 TCGV_UNUSED_I64(o.addr1);
4824 /* Implement the instruction. */
4825 if (insn->help_in1) {
4826 insn->help_in1(s, &f, &o);
4828 if (insn->help_in2) {
4829 insn->help_in2(s, &f, &o);
4831 if (insn->help_prep) {
4832 insn->help_prep(s, &f, &o);
4834 if (insn->help_op) {
4835 ret = insn->help_op(s, &o);
4837 if (insn->help_wout) {
4838 insn->help_wout(s, &f, &o);
4840 if (insn->help_cout) {
4841 insn->help_cout(s, &o);
4844 /* Free any temporaries created by the helpers. */
4845 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4846 tcg_temp_free_i64(o.out);
4848 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4849 tcg_temp_free_i64(o.out2);
4851 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4852 tcg_temp_free_i64(o.in1);
4854 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4855 tcg_temp_free_i64(o.in2);
4857 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4858 tcg_temp_free_i64(o.addr1);
4861 /* Advance to the next instruction. */
4862 s->pc = s->next_pc;
4863 return ret;
4866 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4867 TranslationBlock *tb,
4868 bool search_pc)
4870 CPUState *cs = CPU(cpu);
4871 CPUS390XState *env = &cpu->env;
4872 DisasContext dc;
4873 target_ulong pc_start;
4874 uint64_t next_page_start;
4875 int j, lj = -1;
4876 int num_insns, max_insns;
4877 CPUBreakpoint *bp;
4878 ExitStatus status;
4879 bool do_debug;
4881 pc_start = tb->pc;
4883 /* 31-bit mode */
4884 if (!(tb->flags & FLAG_MASK_64)) {
4885 pc_start &= 0x7fffffff;
4888 dc.tb = tb;
4889 dc.pc = pc_start;
4890 dc.cc_op = CC_OP_DYNAMIC;
4891 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4893 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4895 num_insns = 0;
4896 max_insns = tb->cflags & CF_COUNT_MASK;
4897 if (max_insns == 0) {
4898 max_insns = CF_COUNT_MASK;
4901 gen_tb_start(tb);
4903 do {
4904 if (search_pc) {
4905 j = tcg_op_buf_count();
4906 if (lj < j) {
4907 lj++;
4908 while (lj < j) {
4909 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4912 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4913 gen_opc_cc_op[lj] = dc.cc_op;
4914 tcg_ctx.gen_opc_instr_start[lj] = 1;
4915 tcg_ctx.gen_opc_icount[lj] = num_insns;
4917 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4918 gen_io_start();
4921 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4922 tcg_gen_debug_insn_start(dc.pc);
4925 status = NO_EXIT;
4926 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4927 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4928 if (bp->pc == dc.pc) {
4929 status = EXIT_PC_STALE;
4930 do_debug = true;
4931 break;
4935 if (status == NO_EXIT) {
4936 status = translate_one(env, &dc);
4939 /* If we reach a page boundary, are single stepping,
4940 or exhaust instruction count, stop generation. */
4941 if (status == NO_EXIT
4942 && (dc.pc >= next_page_start
4943 || tcg_op_buf_full()
4944 || num_insns >= max_insns
4945 || singlestep
4946 || cs->singlestep_enabled)) {
4947 status = EXIT_PC_STALE;
4949 } while (status == NO_EXIT);
4951 if (tb->cflags & CF_LAST_IO) {
4952 gen_io_end();
4955 switch (status) {
4956 case EXIT_GOTO_TB:
4957 case EXIT_NORETURN:
4958 break;
4959 case EXIT_PC_STALE:
4960 update_psw_addr(&dc);
4961 /* FALLTHRU */
4962 case EXIT_PC_UPDATED:
4963 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4964 cc op type is in env */
4965 update_cc_op(&dc);
4966 /* Exit the TB, either by raising a debug exception or by return. */
4967 if (do_debug) {
4968 gen_exception(EXCP_DEBUG);
4969 } else {
4970 tcg_gen_exit_tb(0);
4972 break;
4973 default:
4974 abort();
4977 gen_tb_end(tb, num_insns);
4979 if (search_pc) {
4980 j = tcg_op_buf_count();
4981 lj++;
4982 while (lj <= j) {
4983 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4985 } else {
4986 tb->size = dc.pc - pc_start;
4987 tb->icount = num_insns;
4990 #if defined(S390X_DEBUG_DISAS)
4991 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4992 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4993 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4994 qemu_log("\n");
4996 #endif
4999 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5001 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5004 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5006 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5009 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5011 int cc_op;
5012 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5013 cc_op = gen_opc_cc_op[pc_pos];
5014 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5015 env->cc_op = cc_op;