target-s390: Convert SET SYSTEM MASK
[qemu/ar7.git] / target-s390x / translate.c
blob246d0f0a9a2addf4735b330d8ffd094831825d02
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
73 #define DISAS_EXCP 4
75 static void gen_op_calc_cc(DisasContext *s);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
82 static inline void debug_insn(uint64_t insn)
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
94 return pc;
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
100 int i;
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
137 #endif
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
144 #endif
146 cpu_fprintf(f, "\n");
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
163 void s390x_translate_init(void)
165 int i;
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
203 static inline TCGv_i64 load_reg(int reg)
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
210 static inline TCGv_i64 load_freg(int reg)
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
217 static inline TCGv_i32 load_freg32(int reg)
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
228 static inline TCGv_i64 load_freg32_i64(int reg)
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
235 static inline TCGv_i32 load_reg32(int reg)
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
242 static inline TCGv_i64 load_reg32_i64(int reg)
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
249 static inline void store_reg(int reg, TCGv_i64 v)
251 tcg_gen_mov_i64(regs[reg], v);
254 static inline void store_freg(int reg, TCGv_i64 v)
256 tcg_gen_mov_i64(fregs[reg], v);
259 static inline void store_reg32(int reg, TCGv_i32 v)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
276 static inline void store_reg16(int reg, TCGv_i32 v)
278 /* 16 bit register writes keep the upper bytes */
279 #if HOST_LONG_BITS == 32
280 tcg_gen_deposit_i32(TCGV_LOW(regs[reg]), TCGV_LOW(regs[reg]), v, 0, 16);
281 #else
282 tcg_gen_deposit_i64(regs[reg], regs[reg],
283 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 16);
284 #endif
287 static inline void store_freg32(int reg, TCGv_i32 v)
289 /* 32 bit register writes keep the lower half */
290 #if HOST_LONG_BITS == 32
291 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
292 #else
293 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
294 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
295 #endif
298 static inline void store_freg32_i64(int reg, TCGv_i64 v)
300 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
303 static inline void return_low128(TCGv_i64 dest)
305 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
308 static inline void update_psw_addr(DisasContext *s)
310 /* psw.addr */
311 tcg_gen_movi_i64(psw_addr, s->pc);
314 static inline void potential_page_fault(DisasContext *s)
316 #ifndef CONFIG_USER_ONLY
317 update_psw_addr(s);
318 gen_op_calc_cc(s);
319 #endif
322 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
324 return (uint64_t)cpu_lduw_code(env, pc);
327 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
329 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
332 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
334 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
337 static inline int get_mem_index(DisasContext *s)
339 switch (s->tb->flags & FLAG_MASK_ASC) {
340 case PSW_ASC_PRIMARY >> 32:
341 return 0;
342 case PSW_ASC_SECONDARY >> 32:
343 return 1;
344 case PSW_ASC_HOME >> 32:
345 return 2;
346 default:
347 tcg_abort();
348 break;
352 static void gen_exception(int excp)
354 TCGv_i32 tmp = tcg_const_i32(excp);
355 gen_helper_exception(cpu_env, tmp);
356 tcg_temp_free_i32(tmp);
359 static void gen_program_exception(DisasContext *s, int code)
361 TCGv_i32 tmp;
363 /* Remember what pgm exeption this was. */
364 tmp = tcg_const_i32(code);
365 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
366 tcg_temp_free_i32(tmp);
368 tmp = tcg_const_i32(s->next_pc - s->pc);
369 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
370 tcg_temp_free_i32(tmp);
372 /* Advance past instruction. */
373 s->pc = s->next_pc;
374 update_psw_addr(s);
376 /* Save off cc. */
377 gen_op_calc_cc(s);
379 /* Trigger exception. */
380 gen_exception(EXCP_PGM);
382 /* End TB here. */
383 s->is_jmp = DISAS_EXCP;
386 static inline void gen_illegal_opcode(DisasContext *s)
388 gen_program_exception(s, PGM_SPECIFICATION);
391 static inline void check_privileged(DisasContext *s)
393 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
394 gen_program_exception(s, PGM_PRIVILEGED);
398 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
400 TCGv_i64 tmp;
402 /* 31-bitify the immediate part; register contents are dealt with below */
403 if (!(s->tb->flags & FLAG_MASK_64)) {
404 d2 &= 0x7fffffffUL;
407 if (x2) {
408 if (d2) {
409 tmp = tcg_const_i64(d2);
410 tcg_gen_add_i64(tmp, tmp, regs[x2]);
411 } else {
412 tmp = load_reg(x2);
414 if (b2) {
415 tcg_gen_add_i64(tmp, tmp, regs[b2]);
417 } else if (b2) {
418 if (d2) {
419 tmp = tcg_const_i64(d2);
420 tcg_gen_add_i64(tmp, tmp, regs[b2]);
421 } else {
422 tmp = load_reg(b2);
424 } else {
425 tmp = tcg_const_i64(d2);
428 /* 31-bit mode mask if there are values loaded from registers */
429 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
430 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
433 return tmp;
436 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
438 s->cc_op = CC_OP_CONST0 + val;
441 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
443 tcg_gen_discard_i64(cc_src);
444 tcg_gen_mov_i64(cc_dst, dst);
445 tcg_gen_discard_i64(cc_vr);
446 s->cc_op = op;
449 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
451 tcg_gen_discard_i64(cc_src);
452 tcg_gen_extu_i32_i64(cc_dst, dst);
453 tcg_gen_discard_i64(cc_vr);
454 s->cc_op = op;
457 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
458 TCGv_i64 dst)
460 tcg_gen_mov_i64(cc_src, src);
461 tcg_gen_mov_i64(cc_dst, dst);
462 tcg_gen_discard_i64(cc_vr);
463 s->cc_op = op;
466 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
467 TCGv_i32 dst)
469 tcg_gen_extu_i32_i64(cc_src, src);
470 tcg_gen_extu_i32_i64(cc_dst, dst);
471 tcg_gen_discard_i64(cc_vr);
472 s->cc_op = op;
475 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
476 TCGv_i64 dst, TCGv_i64 vr)
478 tcg_gen_mov_i64(cc_src, src);
479 tcg_gen_mov_i64(cc_dst, dst);
480 tcg_gen_mov_i64(cc_vr, vr);
481 s->cc_op = op;
484 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
486 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
489 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
491 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
494 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
495 enum cc_op cond)
497 gen_op_update2_cc_i32(s, cond, v1, v2);
500 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
501 enum cc_op cond)
503 gen_op_update2_cc_i64(s, cond, v1, v2);
506 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
508 cmp_32(s, v1, v2, CC_OP_LTGT_32);
511 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
513 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
516 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
518 /* XXX optimize for the constant? put it in s? */
519 TCGv_i32 tmp = tcg_const_i32(v2);
520 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
521 tcg_temp_free_i32(tmp);
524 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
526 TCGv_i32 tmp = tcg_const_i32(v2);
527 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
528 tcg_temp_free_i32(tmp);
531 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
533 cmp_64(s, v1, v2, CC_OP_LTGT_64);
536 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
538 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
541 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
543 TCGv_i64 tmp = tcg_const_i64(v2);
544 cmp_s64(s, v1, tmp);
545 tcg_temp_free_i64(tmp);
548 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
550 TCGv_i64 tmp = tcg_const_i64(v2);
551 cmp_u64(s, v1, tmp);
552 tcg_temp_free_i64(tmp);
555 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
557 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
560 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
562 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
565 static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
567 tcg_gen_extu_i32_i64(cc_src, v1);
568 tcg_gen_mov_i64(cc_dst, v2);
569 tcg_gen_discard_i64(cc_vr);
570 s->cc_op = CC_OP_LTGT_F32;
573 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
575 gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
578 /* CC value is in env->cc_op */
579 static inline void set_cc_static(DisasContext *s)
581 tcg_gen_discard_i64(cc_src);
582 tcg_gen_discard_i64(cc_dst);
583 tcg_gen_discard_i64(cc_vr);
584 s->cc_op = CC_OP_STATIC;
587 static inline void gen_op_set_cc_op(DisasContext *s)
589 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
590 tcg_gen_movi_i32(cc_op, s->cc_op);
594 static inline void gen_update_cc_op(DisasContext *s)
596 gen_op_set_cc_op(s);
599 /* calculates cc into cc_op */
600 static void gen_op_calc_cc(DisasContext *s)
602 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
603 TCGv_i64 dummy = tcg_const_i64(0);
605 switch (s->cc_op) {
606 case CC_OP_CONST0:
607 case CC_OP_CONST1:
608 case CC_OP_CONST2:
609 case CC_OP_CONST3:
610 /* s->cc_op is the cc value */
611 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
612 break;
613 case CC_OP_STATIC:
614 /* env->cc_op already is the cc value */
615 break;
616 case CC_OP_NZ:
617 case CC_OP_ABS_64:
618 case CC_OP_NABS_64:
619 case CC_OP_ABS_32:
620 case CC_OP_NABS_32:
621 case CC_OP_LTGT0_32:
622 case CC_OP_LTGT0_64:
623 case CC_OP_COMP_32:
624 case CC_OP_COMP_64:
625 case CC_OP_NZ_F32:
626 case CC_OP_NZ_F64:
627 /* 1 argument */
628 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
629 break;
630 case CC_OP_ICM:
631 case CC_OP_LTGT_32:
632 case CC_OP_LTGT_64:
633 case CC_OP_LTUGTU_32:
634 case CC_OP_LTUGTU_64:
635 case CC_OP_TM_32:
636 case CC_OP_TM_64:
637 case CC_OP_LTGT_F32:
638 case CC_OP_LTGT_F64:
639 case CC_OP_SLAG:
640 /* 2 arguments */
641 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
642 break;
643 case CC_OP_ADD_64:
644 case CC_OP_ADDU_64:
645 case CC_OP_ADDC_64:
646 case CC_OP_SUB_64:
647 case CC_OP_SUBU_64:
648 case CC_OP_SUBB_64:
649 case CC_OP_ADD_32:
650 case CC_OP_ADDU_32:
651 case CC_OP_ADDC_32:
652 case CC_OP_SUB_32:
653 case CC_OP_SUBU_32:
654 case CC_OP_SUBB_32:
655 /* 3 arguments */
656 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
657 break;
658 case CC_OP_DYNAMIC:
659 /* unknown operation - assume 3 arguments and cc_op in env */
660 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
661 break;
662 default:
663 tcg_abort();
666 tcg_temp_free_i32(local_cc_op);
667 tcg_temp_free_i64(dummy);
669 /* We now have cc in cc_op as constant */
670 set_cc_static(s);
673 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
675 debug_insn(insn);
677 *r1 = (insn >> 4) & 0xf;
678 *r2 = insn & 0xf;
681 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
682 int *x2, int *b2, int *d2)
684 debug_insn(insn);
686 *r1 = (insn >> 20) & 0xf;
687 *x2 = (insn >> 16) & 0xf;
688 *b2 = (insn >> 12) & 0xf;
689 *d2 = insn & 0xfff;
691 return get_address(s, *x2, *b2, *d2);
694 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
695 int *b2, int *d2)
697 debug_insn(insn);
699 *r1 = (insn >> 20) & 0xf;
700 /* aka m3 */
701 *r3 = (insn >> 16) & 0xf;
702 *b2 = (insn >> 12) & 0xf;
703 *d2 = insn & 0xfff;
706 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
707 int *b1, int *d1)
709 debug_insn(insn);
711 *i2 = (insn >> 16) & 0xff;
712 *b1 = (insn >> 12) & 0xf;
713 *d1 = insn & 0xfff;
715 return get_address(s, 0, *b1, *d1);
718 static int use_goto_tb(DisasContext *s, uint64_t dest)
720 /* NOTE: we handle the case where the TB spans two pages here */
721 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
722 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
723 && !s->singlestep_enabled
724 && !(s->tb->cflags & CF_LAST_IO));
727 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
729 gen_update_cc_op(s);
731 if (use_goto_tb(s, pc)) {
732 tcg_gen_goto_tb(tb_num);
733 tcg_gen_movi_i64(psw_addr, pc);
734 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
735 } else {
736 /* jump to another page: currently not optimized */
737 tcg_gen_movi_i64(psw_addr, pc);
738 tcg_gen_exit_tb(0);
742 static inline void account_noninline_branch(DisasContext *s, int cc_op)
744 #ifdef DEBUG_INLINE_BRANCHES
745 inline_branch_miss[cc_op]++;
746 #endif
749 static inline void account_inline_branch(DisasContext *s, int cc_op)
751 #ifdef DEBUG_INLINE_BRANCHES
752 inline_branch_hit[cc_op]++;
753 #endif
756 /* Table of mask values to comparison codes, given a comparison as input.
757 For a true comparison CC=3 will never be set, but we treat this
758 conservatively for possible use when CC=3 indicates overflow. */
759 static const TCGCond ltgt_cond[16] = {
760 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
761 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
762 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
763 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
764 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
765 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
766 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
767 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
770 /* Table of mask values to comparison codes, given a logic op as input.
771 For such, only CC=0 and CC=1 should be possible. */
772 static const TCGCond nz_cond[16] = {
773 /* | | x | x */
774 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
775 /* | NE | x | x */
776 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
777 /* EQ | | x | x */
778 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
779 /* EQ | NE | x | x */
780 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
783 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
784 details required to generate a TCG comparison. */
785 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
787 TCGCond cond;
788 enum cc_op old_cc_op = s->cc_op;
790 if (mask == 15 || mask == 0) {
791 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
792 c->u.s32.a = cc_op;
793 c->u.s32.b = cc_op;
794 c->g1 = c->g2 = true;
795 c->is_64 = false;
796 return;
799 /* Find the TCG condition for the mask + cc op. */
800 switch (old_cc_op) {
801 case CC_OP_LTGT0_32:
802 case CC_OP_LTGT0_64:
803 case CC_OP_LTGT_32:
804 case CC_OP_LTGT_64:
805 cond = ltgt_cond[mask];
806 if (cond == TCG_COND_NEVER) {
807 goto do_dynamic;
809 account_inline_branch(s, old_cc_op);
810 break;
812 case CC_OP_LTUGTU_32:
813 case CC_OP_LTUGTU_64:
814 cond = tcg_unsigned_cond(ltgt_cond[mask]);
815 if (cond == TCG_COND_NEVER) {
816 goto do_dynamic;
818 account_inline_branch(s, old_cc_op);
819 break;
821 case CC_OP_NZ:
822 cond = nz_cond[mask];
823 if (cond == TCG_COND_NEVER) {
824 goto do_dynamic;
826 account_inline_branch(s, old_cc_op);
827 break;
829 case CC_OP_TM_32:
830 case CC_OP_TM_64:
831 switch (mask) {
832 case 8:
833 cond = TCG_COND_EQ;
834 break;
835 case 4 | 2 | 1:
836 cond = TCG_COND_NE;
837 break;
838 default:
839 goto do_dynamic;
841 account_inline_branch(s, old_cc_op);
842 break;
844 case CC_OP_ICM:
845 switch (mask) {
846 case 8:
847 cond = TCG_COND_EQ;
848 break;
849 case 4 | 2 | 1:
850 case 4 | 2:
851 cond = TCG_COND_NE;
852 break;
853 default:
854 goto do_dynamic;
856 account_inline_branch(s, old_cc_op);
857 break;
859 default:
860 do_dynamic:
861 /* Calculate cc value. */
862 gen_op_calc_cc(s);
863 /* FALLTHRU */
865 case CC_OP_STATIC:
866 /* Jump based on CC. We'll load up the real cond below;
867 the assignment here merely avoids a compiler warning. */
868 account_noninline_branch(s, old_cc_op);
869 old_cc_op = CC_OP_STATIC;
870 cond = TCG_COND_NEVER;
871 break;
874 /* Load up the arguments of the comparison. */
875 c->is_64 = true;
876 c->g1 = c->g2 = false;
877 switch (old_cc_op) {
878 case CC_OP_LTGT0_32:
879 c->is_64 = false;
880 c->u.s32.a = tcg_temp_new_i32();
881 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
882 c->u.s32.b = tcg_const_i32(0);
883 break;
884 case CC_OP_LTGT_32:
885 case CC_OP_LTUGTU_32:
886 c->is_64 = false;
887 c->u.s32.a = tcg_temp_new_i32();
888 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
889 c->u.s32.b = tcg_temp_new_i32();
890 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
891 break;
893 case CC_OP_LTGT0_64:
894 case CC_OP_NZ:
895 c->u.s64.a = cc_dst;
896 c->u.s64.b = tcg_const_i64(0);
897 c->g1 = true;
898 break;
899 case CC_OP_LTGT_64:
900 case CC_OP_LTUGTU_64:
901 c->u.s64.a = cc_src;
902 c->u.s64.b = cc_dst;
903 c->g1 = c->g2 = true;
904 break;
906 case CC_OP_TM_32:
907 case CC_OP_TM_64:
908 case CC_OP_ICM:
909 c->u.s64.a = tcg_temp_new_i64();
910 c->u.s64.b = tcg_const_i64(0);
911 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
912 break;
914 case CC_OP_STATIC:
915 c->is_64 = false;
916 c->u.s32.a = cc_op;
917 c->g1 = true;
918 switch (mask) {
919 case 0x8 | 0x4 | 0x2: /* cc != 3 */
920 cond = TCG_COND_NE;
921 c->u.s32.b = tcg_const_i32(3);
922 break;
923 case 0x8 | 0x4 | 0x1: /* cc != 2 */
924 cond = TCG_COND_NE;
925 c->u.s32.b = tcg_const_i32(2);
926 break;
927 case 0x8 | 0x2 | 0x1: /* cc != 1 */
928 cond = TCG_COND_NE;
929 c->u.s32.b = tcg_const_i32(1);
930 break;
931 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
932 cond = TCG_COND_EQ;
933 c->g1 = false;
934 c->u.s32.a = tcg_temp_new_i32();
935 c->u.s32.b = tcg_const_i32(0);
936 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
937 break;
938 case 0x8 | 0x4: /* cc < 2 */
939 cond = TCG_COND_LTU;
940 c->u.s32.b = tcg_const_i32(2);
941 break;
942 case 0x8: /* cc == 0 */
943 cond = TCG_COND_EQ;
944 c->u.s32.b = tcg_const_i32(0);
945 break;
946 case 0x4 | 0x2 | 0x1: /* cc != 0 */
947 cond = TCG_COND_NE;
948 c->u.s32.b = tcg_const_i32(0);
949 break;
950 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
951 cond = TCG_COND_NE;
952 c->g1 = false;
953 c->u.s32.a = tcg_temp_new_i32();
954 c->u.s32.b = tcg_const_i32(0);
955 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
956 break;
957 case 0x4: /* cc == 1 */
958 cond = TCG_COND_EQ;
959 c->u.s32.b = tcg_const_i32(1);
960 break;
961 case 0x2 | 0x1: /* cc > 1 */
962 cond = TCG_COND_GTU;
963 c->u.s32.b = tcg_const_i32(1);
964 break;
965 case 0x2: /* cc == 2 */
966 cond = TCG_COND_EQ;
967 c->u.s32.b = tcg_const_i32(2);
968 break;
969 case 0x1: /* cc == 3 */
970 cond = TCG_COND_EQ;
971 c->u.s32.b = tcg_const_i32(3);
972 break;
973 default:
974 /* CC is masked by something else: (8 >> cc) & mask. */
975 cond = TCG_COND_NE;
976 c->g1 = false;
977 c->u.s32.a = tcg_const_i32(8);
978 c->u.s32.b = tcg_const_i32(0);
979 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
980 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
981 break;
983 break;
985 default:
986 abort();
988 c->cond = cond;
991 static void free_compare(DisasCompare *c)
993 if (!c->g1) {
994 if (c->is_64) {
995 tcg_temp_free_i64(c->u.s64.a);
996 } else {
997 tcg_temp_free_i32(c->u.s32.a);
1000 if (!c->g2) {
1001 if (c->is_64) {
1002 tcg_temp_free_i64(c->u.s64.b);
1003 } else {
1004 tcg_temp_free_i32(c->u.s32.b);
1009 static void gen_op_mvc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
1011 TCGv_i64 tmp, tmp2;
1012 int i;
1013 int l_memset = gen_new_label();
1014 int l_out = gen_new_label();
1015 TCGv_i64 dest = tcg_temp_local_new_i64();
1016 TCGv_i64 src = tcg_temp_local_new_i64();
1017 TCGv_i32 vl;
1019 /* Find out if we should use the inline version of mvc */
1020 switch (l) {
1021 case 0:
1022 case 1:
1023 case 2:
1024 case 3:
1025 case 4:
1026 case 5:
1027 case 6:
1028 case 7:
1029 case 11:
1030 case 15:
1031 /* use inline */
1032 break;
1033 default:
1034 /* Fall back to helper */
1035 vl = tcg_const_i32(l);
1036 potential_page_fault(s);
1037 gen_helper_mvc(cpu_env, vl, s1, s2);
1038 tcg_temp_free_i32(vl);
1039 return;
1042 tcg_gen_mov_i64(dest, s1);
1043 tcg_gen_mov_i64(src, s2);
1045 if (!(s->tb->flags & FLAG_MASK_64)) {
1046 /* XXX what if we overflow while moving? */
1047 tcg_gen_andi_i64(dest, dest, 0x7fffffffUL);
1048 tcg_gen_andi_i64(src, src, 0x7fffffffUL);
1051 tmp = tcg_temp_new_i64();
1052 tcg_gen_addi_i64(tmp, src, 1);
1053 tcg_gen_brcond_i64(TCG_COND_EQ, dest, tmp, l_memset);
1054 tcg_temp_free_i64(tmp);
1056 switch (l) {
1057 case 0:
1058 tmp = tcg_temp_new_i64();
1060 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1061 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1063 tcg_temp_free_i64(tmp);
1064 break;
1065 case 1:
1066 tmp = tcg_temp_new_i64();
1068 tcg_gen_qemu_ld16u(tmp, src, get_mem_index(s));
1069 tcg_gen_qemu_st16(tmp, dest, get_mem_index(s));
1071 tcg_temp_free_i64(tmp);
1072 break;
1073 case 3:
1074 tmp = tcg_temp_new_i64();
1076 tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
1077 tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
1079 tcg_temp_free_i64(tmp);
1080 break;
1081 case 4:
1082 tmp = tcg_temp_new_i64();
1083 tmp2 = tcg_temp_new_i64();
1085 tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
1086 tcg_gen_addi_i64(src, src, 4);
1087 tcg_gen_qemu_ld8u(tmp2, src, get_mem_index(s));
1088 tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
1089 tcg_gen_addi_i64(dest, dest, 4);
1090 tcg_gen_qemu_st8(tmp2, dest, get_mem_index(s));
1092 tcg_temp_free_i64(tmp);
1093 tcg_temp_free_i64(tmp2);
1094 break;
1095 case 7:
1096 tmp = tcg_temp_new_i64();
1098 tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
1099 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1101 tcg_temp_free_i64(tmp);
1102 break;
1103 default:
1104 /* The inline version can become too big for too uneven numbers, only
1105 use it on known good lengths */
1106 tmp = tcg_temp_new_i64();
1107 tmp2 = tcg_const_i64(8);
1108 for (i = 0; (i + 7) <= l; i += 8) {
1109 tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
1110 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1112 tcg_gen_add_i64(src, src, tmp2);
1113 tcg_gen_add_i64(dest, dest, tmp2);
1116 tcg_temp_free_i64(tmp2);
1117 tmp2 = tcg_const_i64(1);
1119 for (; i <= l; i++) {
1120 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1121 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1123 tcg_gen_add_i64(src, src, tmp2);
1124 tcg_gen_add_i64(dest, dest, tmp2);
1127 tcg_temp_free_i64(tmp2);
1128 tcg_temp_free_i64(tmp);
1129 break;
1132 tcg_gen_br(l_out);
1134 gen_set_label(l_memset);
1135 /* memset case (dest == (src + 1)) */
1137 tmp = tcg_temp_new_i64();
1138 tmp2 = tcg_temp_new_i64();
1139 /* fill tmp with the byte */
1140 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1141 tcg_gen_shli_i64(tmp2, tmp, 8);
1142 tcg_gen_or_i64(tmp, tmp, tmp2);
1143 tcg_gen_shli_i64(tmp2, tmp, 16);
1144 tcg_gen_or_i64(tmp, tmp, tmp2);
1145 tcg_gen_shli_i64(tmp2, tmp, 32);
1146 tcg_gen_or_i64(tmp, tmp, tmp2);
1147 tcg_temp_free_i64(tmp2);
1149 tmp2 = tcg_const_i64(8);
1151 for (i = 0; (i + 7) <= l; i += 8) {
1152 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1153 tcg_gen_addi_i64(dest, dest, 8);
1156 tcg_temp_free_i64(tmp2);
1157 tmp2 = tcg_const_i64(1);
1159 for (; i <= l; i++) {
1160 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1161 tcg_gen_addi_i64(dest, dest, 1);
1164 tcg_temp_free_i64(tmp2);
1165 tcg_temp_free_i64(tmp);
1167 gen_set_label(l_out);
1169 tcg_temp_free(dest);
1170 tcg_temp_free(src);
1173 static void gen_op_clc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
1175 TCGv_i64 tmp;
1176 TCGv_i64 tmp2;
1177 TCGv_i32 vl;
1179 /* check for simple 32bit or 64bit match */
1180 switch (l) {
1181 case 0:
1182 tmp = tcg_temp_new_i64();
1183 tmp2 = tcg_temp_new_i64();
1185 tcg_gen_qemu_ld8u(tmp, s1, get_mem_index(s));
1186 tcg_gen_qemu_ld8u(tmp2, s2, get_mem_index(s));
1187 cmp_u64(s, tmp, tmp2);
1189 tcg_temp_free_i64(tmp);
1190 tcg_temp_free_i64(tmp2);
1191 return;
1192 case 1:
1193 tmp = tcg_temp_new_i64();
1194 tmp2 = tcg_temp_new_i64();
1196 tcg_gen_qemu_ld16u(tmp, s1, get_mem_index(s));
1197 tcg_gen_qemu_ld16u(tmp2, s2, get_mem_index(s));
1198 cmp_u64(s, tmp, tmp2);
1200 tcg_temp_free_i64(tmp);
1201 tcg_temp_free_i64(tmp2);
1202 return;
1203 case 3:
1204 tmp = tcg_temp_new_i64();
1205 tmp2 = tcg_temp_new_i64();
1207 tcg_gen_qemu_ld32u(tmp, s1, get_mem_index(s));
1208 tcg_gen_qemu_ld32u(tmp2, s2, get_mem_index(s));
1209 cmp_u64(s, tmp, tmp2);
1211 tcg_temp_free_i64(tmp);
1212 tcg_temp_free_i64(tmp2);
1213 return;
1214 case 7:
1215 tmp = tcg_temp_new_i64();
1216 tmp2 = tcg_temp_new_i64();
1218 tcg_gen_qemu_ld64(tmp, s1, get_mem_index(s));
1219 tcg_gen_qemu_ld64(tmp2, s2, get_mem_index(s));
1220 cmp_u64(s, tmp, tmp2);
1222 tcg_temp_free_i64(tmp);
1223 tcg_temp_free_i64(tmp2);
1224 return;
1227 potential_page_fault(s);
1228 vl = tcg_const_i32(l);
1229 gen_helper_clc(cc_op, cpu_env, vl, s1, s2);
1230 tcg_temp_free_i32(vl);
1231 set_cc_static(s);
1234 static void disas_e3(CPUS390XState *env, DisasContext* s, int op, int r1,
1235 int x2, int b2, int d2)
1237 TCGv_i64 addr, tmp2;
1238 TCGv_i32 tmp32_1;
1240 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1241 op, r1, x2, b2, d2);
1242 addr = get_address(s, x2, b2, d2);
1243 switch (op) {
1244 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1245 tmp2 = tcg_temp_new_i64();
1246 tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
1247 tcg_gen_bswap64_i64(tmp2, tmp2);
1248 store_reg(r1, tmp2);
1249 tcg_temp_free_i64(tmp2);
1250 break;
1251 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1252 tmp2 = tcg_temp_new_i64();
1253 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1254 tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
1255 store_reg(r1, tmp2);
1256 tcg_temp_free_i64(tmp2);
1257 break;
1258 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1259 tmp2 = tcg_temp_new_i64();
1260 tmp32_1 = tcg_temp_new_i32();
1261 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1262 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1263 tcg_temp_free_i64(tmp2);
1264 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1265 store_reg32(r1, tmp32_1);
1266 tcg_temp_free_i32(tmp32_1);
1267 break;
1268 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1269 tmp2 = tcg_temp_new_i64();
1270 tmp32_1 = tcg_temp_new_i32();
1271 tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
1272 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1273 tcg_temp_free_i64(tmp2);
1274 tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
1275 store_reg16(r1, tmp32_1);
1276 tcg_temp_free_i32(tmp32_1);
1277 break;
1278 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1279 tmp32_1 = load_reg32(r1);
1280 tmp2 = tcg_temp_new_i64();
1281 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1282 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1283 tcg_temp_free_i32(tmp32_1);
1284 tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
1285 tcg_temp_free_i64(tmp2);
1286 break;
1287 default:
1288 LOG_DISAS("illegal e3 operation 0x%x\n", op);
1289 gen_illegal_opcode(s);
1290 break;
1292 tcg_temp_free_i64(addr);
1295 #ifndef CONFIG_USER_ONLY
1296 static void disas_e5(CPUS390XState *env, DisasContext* s, uint64_t insn)
1298 TCGv_i64 tmp, tmp2;
1299 int op = (insn >> 32) & 0xff;
1301 tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
1302 tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
1304 LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
1305 switch (op) {
1306 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1307 /* Test Protection */
1308 potential_page_fault(s);
1309 gen_helper_tprot(cc_op, tmp, tmp2);
1310 set_cc_static(s);
1311 break;
1312 default:
1313 LOG_DISAS("illegal e5 operation 0x%x\n", op);
1314 gen_illegal_opcode(s);
1315 break;
1318 tcg_temp_free_i64(tmp);
1319 tcg_temp_free_i64(tmp2);
1321 #endif
1323 static void disas_eb(CPUS390XState *env, DisasContext *s, int op, int r1,
1324 int r3, int b2, int d2)
1326 TCGv_i64 tmp, tmp2, tmp3, tmp4;
1327 TCGv_i32 tmp32_1, tmp32_2;
1328 int i, stm_len;
1330 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1331 op, r1, r3, b2, d2);
1332 switch (op) {
1333 case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */
1334 case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */
1335 case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */
1336 case 0xb: /* SLAG R1,R3,D2(B2) [RSY] */
1337 case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */
1338 if (b2) {
1339 tmp = get_address(s, 0, b2, d2);
1340 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1341 } else {
1342 tmp = tcg_const_i64(d2 & 0x3f);
1344 switch (op) {
1345 case 0xc:
1346 tcg_gen_shr_i64(regs[r1], regs[r3], tmp);
1347 break;
1348 case 0xd:
1349 tcg_gen_shl_i64(regs[r1], regs[r3], tmp);
1350 break;
1351 case 0xa:
1352 tcg_gen_sar_i64(regs[r1], regs[r3], tmp);
1353 break;
1354 case 0xb:
1355 tmp2 = tcg_temp_new_i64();
1356 tmp3 = tcg_temp_new_i64();
1357 gen_op_update2_cc_i64(s, CC_OP_SLAG, regs[r3], tmp);
1358 tcg_gen_shl_i64(tmp2, regs[r3], tmp);
1359 /* override sign bit with source sign */
1360 tcg_gen_andi_i64(tmp2, tmp2, ~0x8000000000000000ULL);
1361 tcg_gen_andi_i64(tmp3, regs[r3], 0x8000000000000000ULL);
1362 tcg_gen_or_i64(regs[r1], tmp2, tmp3);
1363 tcg_temp_free_i64(tmp2);
1364 tcg_temp_free_i64(tmp3);
1365 break;
1366 case 0x1c:
1367 tcg_gen_rotl_i64(regs[r1], regs[r3], tmp);
1368 break;
1369 default:
1370 tcg_abort();
1371 break;
1373 if (op == 0xa) {
1374 set_cc_s64(s, regs[r1]);
1376 tcg_temp_free_i64(tmp);
1377 break;
1378 case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */
1379 if (b2) {
1380 tmp = get_address(s, 0, b2, d2);
1381 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1382 } else {
1383 tmp = tcg_const_i64(d2 & 0x3f);
1385 tmp32_1 = tcg_temp_new_i32();
1386 tmp32_2 = load_reg32(r3);
1387 tcg_gen_trunc_i64_i32(tmp32_1, tmp);
1388 switch (op) {
1389 case 0x1d:
1390 tcg_gen_rotl_i32(tmp32_1, tmp32_2, tmp32_1);
1391 break;
1392 default:
1393 tcg_abort();
1394 break;
1396 store_reg32(r1, tmp32_1);
1397 tcg_temp_free_i64(tmp);
1398 tcg_temp_free_i32(tmp32_1);
1399 tcg_temp_free_i32(tmp32_2);
1400 break;
1401 case 0x4: /* LMG R1,R3,D2(B2) [RSE] */
1402 case 0x24: /* STMG R1,R3,D2(B2) [RSE] */
1403 stm_len = 8;
1404 goto do_mh;
1405 case 0x26: /* STMH R1,R3,D2(B2) [RSE] */
1406 case 0x96: /* LMH R1,R3,D2(B2) [RSE] */
1407 stm_len = 4;
1408 do_mh:
1409 /* Apparently, unrolling lmg/stmg of any size gains performance -
1410 even for very long ones... */
1411 tmp = get_address(s, 0, b2, d2);
1412 tmp3 = tcg_const_i64(stm_len);
1413 tmp4 = tcg_const_i64(op == 0x26 ? 32 : 4);
1414 for (i = r1;; i = (i + 1) % 16) {
1415 switch (op) {
1416 case 0x4:
1417 tcg_gen_qemu_ld64(regs[i], tmp, get_mem_index(s));
1418 break;
1419 case 0x96:
1420 tmp2 = tcg_temp_new_i64();
1421 #if HOST_LONG_BITS == 32
1422 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1423 tcg_gen_trunc_i64_i32(TCGV_HIGH(regs[i]), tmp2);
1424 #else
1425 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1426 tcg_gen_shl_i64(tmp2, tmp2, tmp4);
1427 tcg_gen_ext32u_i64(regs[i], regs[i]);
1428 tcg_gen_or_i64(regs[i], regs[i], tmp2);
1429 #endif
1430 tcg_temp_free_i64(tmp2);
1431 break;
1432 case 0x24:
1433 tcg_gen_qemu_st64(regs[i], tmp, get_mem_index(s));
1434 break;
1435 case 0x26:
1436 tmp2 = tcg_temp_new_i64();
1437 tcg_gen_shr_i64(tmp2, regs[i], tmp4);
1438 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1439 tcg_temp_free_i64(tmp2);
1440 break;
1441 default:
1442 tcg_abort();
1444 if (i == r3) {
1445 break;
1447 tcg_gen_add_i64(tmp, tmp, tmp3);
1449 tcg_temp_free_i64(tmp);
1450 tcg_temp_free_i64(tmp3);
1451 tcg_temp_free_i64(tmp4);
1452 break;
1453 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1454 tmp = get_address(s, 0, b2, d2);
1455 tmp32_1 = tcg_const_i32(r1);
1456 tmp32_2 = tcg_const_i32(r3);
1457 potential_page_fault(s);
1458 gen_helper_stcmh(cpu_env, tmp32_1, tmp, tmp32_2);
1459 tcg_temp_free_i64(tmp);
1460 tcg_temp_free_i32(tmp32_1);
1461 tcg_temp_free_i32(tmp32_2);
1462 break;
1463 #ifndef CONFIG_USER_ONLY
1464 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1465 /* Load Control */
1466 check_privileged(s);
1467 tmp = get_address(s, 0, b2, d2);
1468 tmp32_1 = tcg_const_i32(r1);
1469 tmp32_2 = tcg_const_i32(r3);
1470 potential_page_fault(s);
1471 gen_helper_lctlg(cpu_env, tmp32_1, tmp, tmp32_2);
1472 tcg_temp_free_i64(tmp);
1473 tcg_temp_free_i32(tmp32_1);
1474 tcg_temp_free_i32(tmp32_2);
1475 break;
1476 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1477 /* Store Control */
1478 check_privileged(s);
1479 tmp = get_address(s, 0, b2, d2);
1480 tmp32_1 = tcg_const_i32(r1);
1481 tmp32_2 = tcg_const_i32(r3);
1482 potential_page_fault(s);
1483 gen_helper_stctg(cpu_env, tmp32_1, tmp, tmp32_2);
1484 tcg_temp_free_i64(tmp);
1485 tcg_temp_free_i32(tmp32_1);
1486 tcg_temp_free_i32(tmp32_2);
1487 break;
1488 #endif
1489 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1490 tmp = get_address(s, 0, b2, d2);
1491 tmp32_1 = tcg_const_i32(r1);
1492 tmp32_2 = tcg_const_i32(r3);
1493 potential_page_fault(s);
1494 /* XXX rewrite in tcg */
1495 gen_helper_csg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1496 set_cc_static(s);
1497 tcg_temp_free_i64(tmp);
1498 tcg_temp_free_i32(tmp32_1);
1499 tcg_temp_free_i32(tmp32_2);
1500 break;
1501 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1502 tmp = get_address(s, 0, b2, d2);
1503 tmp32_1 = tcg_const_i32(r1);
1504 tmp32_2 = tcg_const_i32(r3);
1505 potential_page_fault(s);
1506 /* XXX rewrite in tcg */
1507 gen_helper_cdsg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1508 set_cc_static(s);
1509 tcg_temp_free_i64(tmp);
1510 tcg_temp_free_i32(tmp32_1);
1511 tcg_temp_free_i32(tmp32_2);
1512 break;
1513 case 0x52: /* MVIY D1(B1),I2 [SIY] */
1514 tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */
1515 tmp2 = tcg_const_i64((r1 << 4) | r3);
1516 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
1517 tcg_temp_free_i64(tmp);
1518 tcg_temp_free_i64(tmp2);
1519 break;
1520 default:
1521 LOG_DISAS("illegal eb operation 0x%x\n", op);
1522 gen_illegal_opcode(s);
1523 break;
1527 static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
1528 int x2, int b2, int d2, int r1b)
1530 TCGv_i32 tmp_r1, tmp32;
1531 TCGv_i64 addr, tmp;
1532 addr = get_address(s, x2, b2, d2);
1533 tmp_r1 = tcg_const_i32(r1);
1534 switch (op) {
1535 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1536 potential_page_fault(s);
1537 gen_helper_ldeb(cpu_env, tmp_r1, addr);
1538 break;
1539 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1540 potential_page_fault(s);
1541 gen_helper_lxdb(cpu_env, tmp_r1, addr);
1542 break;
1543 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1544 tmp = tcg_temp_new_i64();
1545 tmp32 = load_freg32(r1);
1546 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1547 set_cc_cmp_f32_i64(s, tmp32, tmp);
1548 tcg_temp_free_i64(tmp);
1549 tcg_temp_free_i32(tmp32);
1550 break;
1551 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1552 tmp = tcg_temp_new_i64();
1553 tmp32 = tcg_temp_new_i32();
1554 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1555 tcg_gen_trunc_i64_i32(tmp32, tmp);
1556 gen_helper_aeb(cpu_env, tmp_r1, tmp32);
1557 tcg_temp_free_i64(tmp);
1558 tcg_temp_free_i32(tmp32);
1560 tmp32 = load_freg32(r1);
1561 gen_set_cc_nz_f32(s, tmp32);
1562 tcg_temp_free_i32(tmp32);
1563 break;
1564 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1565 tmp = tcg_temp_new_i64();
1566 tmp32 = tcg_temp_new_i32();
1567 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1568 tcg_gen_trunc_i64_i32(tmp32, tmp);
1569 gen_helper_seb(cpu_env, tmp_r1, tmp32);
1570 tcg_temp_free_i64(tmp);
1571 tcg_temp_free_i32(tmp32);
1573 tmp32 = load_freg32(r1);
1574 gen_set_cc_nz_f32(s, tmp32);
1575 tcg_temp_free_i32(tmp32);
1576 break;
1577 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1578 tmp = tcg_temp_new_i64();
1579 tmp32 = tcg_temp_new_i32();
1580 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1581 tcg_gen_trunc_i64_i32(tmp32, tmp);
1582 gen_helper_deb(cpu_env, tmp_r1, tmp32);
1583 tcg_temp_free_i64(tmp);
1584 tcg_temp_free_i32(tmp32);
1585 break;
1586 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1587 potential_page_fault(s);
1588 gen_helper_tceb(cc_op, cpu_env, tmp_r1, addr);
1589 set_cc_static(s);
1590 break;
1591 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1592 potential_page_fault(s);
1593 gen_helper_tcdb(cc_op, cpu_env, tmp_r1, addr);
1594 set_cc_static(s);
1595 break;
1596 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1597 potential_page_fault(s);
1598 gen_helper_tcxb(cc_op, cpu_env, tmp_r1, addr);
1599 set_cc_static(s);
1600 break;
1601 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1602 tmp = tcg_temp_new_i64();
1603 tmp32 = tcg_temp_new_i32();
1604 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1605 tcg_gen_trunc_i64_i32(tmp32, tmp);
1606 gen_helper_meeb(cpu_env, tmp_r1, tmp32);
1607 tcg_temp_free_i64(tmp);
1608 tcg_temp_free_i32(tmp32);
1609 break;
1610 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1611 potential_page_fault(s);
1612 gen_helper_cdb(cc_op, cpu_env, tmp_r1, addr);
1613 set_cc_static(s);
1614 break;
1615 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1616 potential_page_fault(s);
1617 gen_helper_adb(cc_op, cpu_env, tmp_r1, addr);
1618 set_cc_static(s);
1619 break;
1620 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1621 potential_page_fault(s);
1622 gen_helper_sdb(cc_op, cpu_env, tmp_r1, addr);
1623 set_cc_static(s);
1624 break;
1625 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1626 potential_page_fault(s);
1627 gen_helper_mdb(cpu_env, tmp_r1, addr);
1628 break;
1629 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1630 potential_page_fault(s);
1631 gen_helper_ddb(cpu_env, tmp_r1, addr);
1632 break;
1633 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1634 /* for RXF insns, r1 is R3 and r1b is R1 */
1635 tmp32 = tcg_const_i32(r1b);
1636 potential_page_fault(s);
1637 gen_helper_madb(cpu_env, tmp32, addr, tmp_r1);
1638 tcg_temp_free_i32(tmp32);
1639 break;
1640 default:
1641 LOG_DISAS("illegal ed operation 0x%x\n", op);
1642 gen_illegal_opcode(s);
1643 return;
1645 tcg_temp_free_i32(tmp_r1);
1646 tcg_temp_free_i64(addr);
1649 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1650 uint32_t insn)
1652 TCGv_i64 tmp, tmp2, tmp3;
1653 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1654 int r1, r2;
1655 #ifndef CONFIG_USER_ONLY
1656 int r3, d2, b2;
1657 #endif
1659 r1 = (insn >> 4) & 0xf;
1660 r2 = insn & 0xf;
1662 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1664 switch (op) {
1665 case 0x22: /* IPM R1 [RRE] */
1666 tmp32_1 = tcg_const_i32(r1);
1667 gen_op_calc_cc(s);
1668 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1669 tcg_temp_free_i32(tmp32_1);
1670 break;
1671 case 0x41: /* CKSM R1,R2 [RRE] */
1672 tmp32_1 = tcg_const_i32(r1);
1673 tmp32_2 = tcg_const_i32(r2);
1674 potential_page_fault(s);
1675 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1676 tcg_temp_free_i32(tmp32_1);
1677 tcg_temp_free_i32(tmp32_2);
1678 gen_op_movi_cc(s, 0);
1679 break;
1680 case 0x4e: /* SAR R1,R2 [RRE] */
1681 tmp32_1 = load_reg32(r2);
1682 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1683 tcg_temp_free_i32(tmp32_1);
1684 break;
1685 case 0x4f: /* EAR R1,R2 [RRE] */
1686 tmp32_1 = tcg_temp_new_i32();
1687 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1688 store_reg32(r1, tmp32_1);
1689 tcg_temp_free_i32(tmp32_1);
1690 break;
1691 case 0x54: /* MVPG R1,R2 [RRE] */
1692 tmp = load_reg(0);
1693 tmp2 = load_reg(r1);
1694 tmp3 = load_reg(r2);
1695 potential_page_fault(s);
1696 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1697 tcg_temp_free_i64(tmp);
1698 tcg_temp_free_i64(tmp2);
1699 tcg_temp_free_i64(tmp3);
1700 /* XXX check CCO bit and set CC accordingly */
1701 gen_op_movi_cc(s, 0);
1702 break;
1703 case 0x55: /* MVST R1,R2 [RRE] */
1704 tmp32_1 = load_reg32(0);
1705 tmp32_2 = tcg_const_i32(r1);
1706 tmp32_3 = tcg_const_i32(r2);
1707 potential_page_fault(s);
1708 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1709 tcg_temp_free_i32(tmp32_1);
1710 tcg_temp_free_i32(tmp32_2);
1711 tcg_temp_free_i32(tmp32_3);
1712 gen_op_movi_cc(s, 1);
1713 break;
1714 case 0x5d: /* CLST R1,R2 [RRE] */
1715 tmp32_1 = load_reg32(0);
1716 tmp32_2 = tcg_const_i32(r1);
1717 tmp32_3 = tcg_const_i32(r2);
1718 potential_page_fault(s);
1719 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1720 set_cc_static(s);
1721 tcg_temp_free_i32(tmp32_1);
1722 tcg_temp_free_i32(tmp32_2);
1723 tcg_temp_free_i32(tmp32_3);
1724 break;
1725 case 0x5e: /* SRST R1,R2 [RRE] */
1726 tmp32_1 = load_reg32(0);
1727 tmp32_2 = tcg_const_i32(r1);
1728 tmp32_3 = tcg_const_i32(r2);
1729 potential_page_fault(s);
1730 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1731 set_cc_static(s);
1732 tcg_temp_free_i32(tmp32_1);
1733 tcg_temp_free_i32(tmp32_2);
1734 tcg_temp_free_i32(tmp32_3);
1735 break;
1737 #ifndef CONFIG_USER_ONLY
1738 case 0x02: /* STIDP D2(B2) [S] */
1739 /* Store CPU ID */
1740 check_privileged(s);
1741 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1742 tmp = get_address(s, 0, b2, d2);
1743 potential_page_fault(s);
1744 gen_helper_stidp(cpu_env, tmp);
1745 tcg_temp_free_i64(tmp);
1746 break;
1747 case 0x04: /* SCK D2(B2) [S] */
1748 /* Set Clock */
1749 check_privileged(s);
1750 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1751 tmp = get_address(s, 0, b2, d2);
1752 potential_page_fault(s);
1753 gen_helper_sck(cc_op, tmp);
1754 set_cc_static(s);
1755 tcg_temp_free_i64(tmp);
1756 break;
1757 case 0x05: /* STCK D2(B2) [S] */
1758 /* Store Clock */
1759 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1760 tmp = get_address(s, 0, b2, d2);
1761 potential_page_fault(s);
1762 gen_helper_stck(cc_op, cpu_env, tmp);
1763 set_cc_static(s);
1764 tcg_temp_free_i64(tmp);
1765 break;
1766 case 0x06: /* SCKC D2(B2) [S] */
1767 /* Set Clock Comparator */
1768 check_privileged(s);
1769 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1770 tmp = get_address(s, 0, b2, d2);
1771 potential_page_fault(s);
1772 gen_helper_sckc(cpu_env, tmp);
1773 tcg_temp_free_i64(tmp);
1774 break;
1775 case 0x07: /* STCKC D2(B2) [S] */
1776 /* Store Clock Comparator */
1777 check_privileged(s);
1778 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1779 tmp = get_address(s, 0, b2, d2);
1780 potential_page_fault(s);
1781 gen_helper_stckc(cpu_env, tmp);
1782 tcg_temp_free_i64(tmp);
1783 break;
1784 case 0x08: /* SPT D2(B2) [S] */
1785 /* Set CPU Timer */
1786 check_privileged(s);
1787 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1788 tmp = get_address(s, 0, b2, d2);
1789 potential_page_fault(s);
1790 gen_helper_spt(cpu_env, tmp);
1791 tcg_temp_free_i64(tmp);
1792 break;
1793 case 0x09: /* STPT D2(B2) [S] */
1794 /* Store CPU Timer */
1795 check_privileged(s);
1796 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1797 tmp = get_address(s, 0, b2, d2);
1798 potential_page_fault(s);
1799 gen_helper_stpt(cpu_env, tmp);
1800 tcg_temp_free_i64(tmp);
1801 break;
1802 case 0x0a: /* SPKA D2(B2) [S] */
1803 /* Set PSW Key from Address */
1804 check_privileged(s);
1805 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1806 tmp = get_address(s, 0, b2, d2);
1807 tmp2 = tcg_temp_new_i64();
1808 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1809 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1810 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1811 tcg_temp_free_i64(tmp2);
1812 tcg_temp_free_i64(tmp);
1813 break;
1814 case 0x0d: /* PTLB [S] */
1815 /* Purge TLB */
1816 check_privileged(s);
1817 gen_helper_ptlb(cpu_env);
1818 break;
1819 case 0x10: /* SPX D2(B2) [S] */
1820 /* Set Prefix Register */
1821 check_privileged(s);
1822 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1823 tmp = get_address(s, 0, b2, d2);
1824 potential_page_fault(s);
1825 gen_helper_spx(cpu_env, tmp);
1826 tcg_temp_free_i64(tmp);
1827 break;
1828 case 0x11: /* STPX D2(B2) [S] */
1829 /* Store Prefix */
1830 check_privileged(s);
1831 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1832 tmp = get_address(s, 0, b2, d2);
1833 tmp2 = tcg_temp_new_i64();
1834 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1835 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1836 tcg_temp_free_i64(tmp);
1837 tcg_temp_free_i64(tmp2);
1838 break;
1839 case 0x12: /* STAP D2(B2) [S] */
1840 /* Store CPU Address */
1841 check_privileged(s);
1842 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1843 tmp = get_address(s, 0, b2, d2);
1844 tmp2 = tcg_temp_new_i64();
1845 tmp32_1 = tcg_temp_new_i32();
1846 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1847 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1848 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1849 tcg_temp_free_i64(tmp);
1850 tcg_temp_free_i64(tmp2);
1851 tcg_temp_free_i32(tmp32_1);
1852 break;
1853 case 0x21: /* IPTE R1,R2 [RRE] */
1854 /* Invalidate PTE */
1855 check_privileged(s);
1856 r1 = (insn >> 4) & 0xf;
1857 r2 = insn & 0xf;
1858 tmp = load_reg(r1);
1859 tmp2 = load_reg(r2);
1860 gen_helper_ipte(cpu_env, tmp, tmp2);
1861 tcg_temp_free_i64(tmp);
1862 tcg_temp_free_i64(tmp2);
1863 break;
1864 case 0x29: /* ISKE R1,R2 [RRE] */
1865 /* Insert Storage Key Extended */
1866 check_privileged(s);
1867 r1 = (insn >> 4) & 0xf;
1868 r2 = insn & 0xf;
1869 tmp = load_reg(r2);
1870 tmp2 = tcg_temp_new_i64();
1871 gen_helper_iske(tmp2, cpu_env, tmp);
1872 store_reg(r1, tmp2);
1873 tcg_temp_free_i64(tmp);
1874 tcg_temp_free_i64(tmp2);
1875 break;
1876 case 0x2a: /* RRBE R1,R2 [RRE] */
1877 /* Set Storage Key Extended */
1878 check_privileged(s);
1879 r1 = (insn >> 4) & 0xf;
1880 r2 = insn & 0xf;
1881 tmp32_1 = load_reg32(r1);
1882 tmp = load_reg(r2);
1883 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1884 set_cc_static(s);
1885 tcg_temp_free_i32(tmp32_1);
1886 tcg_temp_free_i64(tmp);
1887 break;
1888 case 0x2b: /* SSKE R1,R2 [RRE] */
1889 /* Set Storage Key Extended */
1890 check_privileged(s);
1891 r1 = (insn >> 4) & 0xf;
1892 r2 = insn & 0xf;
1893 tmp32_1 = load_reg32(r1);
1894 tmp = load_reg(r2);
1895 gen_helper_sske(cpu_env, tmp32_1, tmp);
1896 tcg_temp_free_i32(tmp32_1);
1897 tcg_temp_free_i64(tmp);
1898 break;
1899 case 0x34: /* STCH ? */
1900 /* Store Subchannel */
1901 check_privileged(s);
1902 gen_op_movi_cc(s, 3);
1903 break;
1904 case 0x46: /* STURA R1,R2 [RRE] */
1905 /* Store Using Real Address */
1906 check_privileged(s);
1907 r1 = (insn >> 4) & 0xf;
1908 r2 = insn & 0xf;
1909 tmp32_1 = load_reg32(r1);
1910 tmp = load_reg(r2);
1911 potential_page_fault(s);
1912 gen_helper_stura(cpu_env, tmp, tmp32_1);
1913 tcg_temp_free_i32(tmp32_1);
1914 tcg_temp_free_i64(tmp);
1915 break;
1916 case 0x50: /* CSP R1,R2 [RRE] */
1917 /* Compare And Swap And Purge */
1918 check_privileged(s);
1919 r1 = (insn >> 4) & 0xf;
1920 r2 = insn & 0xf;
1921 tmp32_1 = tcg_const_i32(r1);
1922 tmp32_2 = tcg_const_i32(r2);
1923 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1924 set_cc_static(s);
1925 tcg_temp_free_i32(tmp32_1);
1926 tcg_temp_free_i32(tmp32_2);
1927 break;
1928 case 0x5f: /* CHSC ? */
1929 /* Channel Subsystem Call */
1930 check_privileged(s);
1931 gen_op_movi_cc(s, 3);
1932 break;
1933 case 0x78: /* STCKE D2(B2) [S] */
1934 /* Store Clock Extended */
1935 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1936 tmp = get_address(s, 0, b2, d2);
1937 potential_page_fault(s);
1938 gen_helper_stcke(cc_op, cpu_env, tmp);
1939 set_cc_static(s);
1940 tcg_temp_free_i64(tmp);
1941 break;
1942 case 0x79: /* SACF D2(B2) [S] */
1943 /* Set Address Space Control Fast */
1944 check_privileged(s);
1945 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1946 tmp = get_address(s, 0, b2, d2);
1947 potential_page_fault(s);
1948 gen_helper_sacf(cpu_env, tmp);
1949 tcg_temp_free_i64(tmp);
1950 /* addressing mode has changed, so end the block */
1951 s->pc = s->next_pc;
1952 update_psw_addr(s);
1953 s->is_jmp = DISAS_JUMP;
1954 break;
1955 case 0x7d: /* STSI D2,(B2) [S] */
1956 check_privileged(s);
1957 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1958 tmp = get_address(s, 0, b2, d2);
1959 tmp32_1 = load_reg32(0);
1960 tmp32_2 = load_reg32(1);
1961 potential_page_fault(s);
1962 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1963 set_cc_static(s);
1964 tcg_temp_free_i64(tmp);
1965 tcg_temp_free_i32(tmp32_1);
1966 tcg_temp_free_i32(tmp32_2);
1967 break;
1968 case 0x9d: /* LFPC D2(B2) [S] */
1969 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1970 tmp = get_address(s, 0, b2, d2);
1971 tmp2 = tcg_temp_new_i64();
1972 tmp32_1 = tcg_temp_new_i32();
1973 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1974 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1975 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1976 tcg_temp_free_i64(tmp);
1977 tcg_temp_free_i64(tmp2);
1978 tcg_temp_free_i32(tmp32_1);
1979 break;
1980 case 0xb1: /* STFL D2(B2) [S] */
1981 /* Store Facility List (CPU features) at 200 */
1982 check_privileged(s);
1983 tmp2 = tcg_const_i64(0xc0000000);
1984 tmp = tcg_const_i64(200);
1985 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1986 tcg_temp_free_i64(tmp2);
1987 tcg_temp_free_i64(tmp);
1988 break;
1989 case 0xb2: /* LPSWE D2(B2) [S] */
1990 /* Load PSW Extended */
1991 check_privileged(s);
1992 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1993 tmp = get_address(s, 0, b2, d2);
1994 tmp2 = tcg_temp_new_i64();
1995 tmp3 = tcg_temp_new_i64();
1996 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1997 tcg_gen_addi_i64(tmp, tmp, 8);
1998 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1999 gen_helper_load_psw(cpu_env, tmp2, tmp3);
2000 /* we need to keep cc_op intact */
2001 s->is_jmp = DISAS_JUMP;
2002 tcg_temp_free_i64(tmp);
2003 tcg_temp_free_i64(tmp2);
2004 tcg_temp_free_i64(tmp3);
2005 break;
2006 case 0x20: /* SERVC R1,R2 [RRE] */
2007 /* SCLP Service call (PV hypercall) */
2008 check_privileged(s);
2009 potential_page_fault(s);
2010 tmp32_1 = load_reg32(r2);
2011 tmp = load_reg(r1);
2012 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
2013 set_cc_static(s);
2014 tcg_temp_free_i32(tmp32_1);
2015 tcg_temp_free_i64(tmp);
2016 break;
2017 #endif
2018 default:
2019 LOG_DISAS("illegal b2 operation 0x%x\n", op);
2020 gen_illegal_opcode(s);
2021 break;
2025 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
2026 int r1, int r2)
2028 TCGv_i64 tmp;
2029 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
2030 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
2031 #define FP_HELPER(i) \
2032 tmp32_1 = tcg_const_i32(r1); \
2033 tmp32_2 = tcg_const_i32(r2); \
2034 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
2035 tcg_temp_free_i32(tmp32_1); \
2036 tcg_temp_free_i32(tmp32_2);
2038 #define FP_HELPER_CC(i) \
2039 tmp32_1 = tcg_const_i32(r1); \
2040 tmp32_2 = tcg_const_i32(r2); \
2041 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
2042 set_cc_static(s); \
2043 tcg_temp_free_i32(tmp32_1); \
2044 tcg_temp_free_i32(tmp32_2);
2046 switch (op) {
2047 case 0x0: /* LPEBR R1,R2 [RRE] */
2048 FP_HELPER_CC(lpebr);
2049 break;
2050 case 0x2: /* LTEBR R1,R2 [RRE] */
2051 FP_HELPER_CC(ltebr);
2052 break;
2053 case 0x3: /* LCEBR R1,R2 [RRE] */
2054 FP_HELPER_CC(lcebr);
2055 break;
2056 case 0x4: /* LDEBR R1,R2 [RRE] */
2057 FP_HELPER(ldebr);
2058 break;
2059 case 0x5: /* LXDBR R1,R2 [RRE] */
2060 FP_HELPER(lxdbr);
2061 break;
2062 case 0x9: /* CEBR R1,R2 [RRE] */
2063 FP_HELPER_CC(cebr);
2064 break;
2065 case 0xa: /* AEBR R1,R2 [RRE] */
2066 FP_HELPER_CC(aebr);
2067 break;
2068 case 0xb: /* SEBR R1,R2 [RRE] */
2069 FP_HELPER_CC(sebr);
2070 break;
2071 case 0xd: /* DEBR R1,R2 [RRE] */
2072 FP_HELPER(debr);
2073 break;
2074 case 0x10: /* LPDBR R1,R2 [RRE] */
2075 FP_HELPER_CC(lpdbr);
2076 break;
2077 case 0x12: /* LTDBR R1,R2 [RRE] */
2078 FP_HELPER_CC(ltdbr);
2079 break;
2080 case 0x13: /* LCDBR R1,R2 [RRE] */
2081 FP_HELPER_CC(lcdbr);
2082 break;
2083 case 0x15: /* SQBDR R1,R2 [RRE] */
2084 FP_HELPER(sqdbr);
2085 break;
2086 case 0x17: /* MEEBR R1,R2 [RRE] */
2087 FP_HELPER(meebr);
2088 break;
2089 case 0x19: /* CDBR R1,R2 [RRE] */
2090 FP_HELPER_CC(cdbr);
2091 break;
2092 case 0x1a: /* ADBR R1,R2 [RRE] */
2093 FP_HELPER_CC(adbr);
2094 break;
2095 case 0x1b: /* SDBR R1,R2 [RRE] */
2096 FP_HELPER_CC(sdbr);
2097 break;
2098 case 0x1c: /* MDBR R1,R2 [RRE] */
2099 FP_HELPER(mdbr);
2100 break;
2101 case 0x1d: /* DDBR R1,R2 [RRE] */
2102 FP_HELPER(ddbr);
2103 break;
2104 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
2105 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
2106 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
2107 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
2108 tmp32_1 = tcg_const_i32(m3);
2109 tmp32_2 = tcg_const_i32(r2);
2110 tmp32_3 = tcg_const_i32(r1);
2111 switch (op) {
2112 case 0xe:
2113 gen_helper_maebr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2114 break;
2115 case 0x1e:
2116 gen_helper_madbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2117 break;
2118 case 0x1f:
2119 gen_helper_msdbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2120 break;
2121 default:
2122 tcg_abort();
2124 tcg_temp_free_i32(tmp32_1);
2125 tcg_temp_free_i32(tmp32_2);
2126 tcg_temp_free_i32(tmp32_3);
2127 break;
2128 case 0x40: /* LPXBR R1,R2 [RRE] */
2129 FP_HELPER_CC(lpxbr);
2130 break;
2131 case 0x42: /* LTXBR R1,R2 [RRE] */
2132 FP_HELPER_CC(ltxbr);
2133 break;
2134 case 0x43: /* LCXBR R1,R2 [RRE] */
2135 FP_HELPER_CC(lcxbr);
2136 break;
2137 case 0x44: /* LEDBR R1,R2 [RRE] */
2138 FP_HELPER(ledbr);
2139 break;
2140 case 0x45: /* LDXBR R1,R2 [RRE] */
2141 FP_HELPER(ldxbr);
2142 break;
2143 case 0x46: /* LEXBR R1,R2 [RRE] */
2144 FP_HELPER(lexbr);
2145 break;
2146 case 0x49: /* CXBR R1,R2 [RRE] */
2147 FP_HELPER_CC(cxbr);
2148 break;
2149 case 0x4a: /* AXBR R1,R2 [RRE] */
2150 FP_HELPER_CC(axbr);
2151 break;
2152 case 0x4b: /* SXBR R1,R2 [RRE] */
2153 FP_HELPER_CC(sxbr);
2154 break;
2155 case 0x4c: /* MXBR R1,R2 [RRE] */
2156 FP_HELPER(mxbr);
2157 break;
2158 case 0x4d: /* DXBR R1,R2 [RRE] */
2159 FP_HELPER(dxbr);
2160 break;
2161 case 0x65: /* LXR R1,R2 [RRE] */
2162 tmp = load_freg(r2);
2163 store_freg(r1, tmp);
2164 tcg_temp_free_i64(tmp);
2165 tmp = load_freg(r2 + 2);
2166 store_freg(r1 + 2, tmp);
2167 tcg_temp_free_i64(tmp);
2168 break;
2169 case 0x74: /* LZER R1 [RRE] */
2170 tmp32_1 = tcg_const_i32(r1);
2171 gen_helper_lzer(cpu_env, tmp32_1);
2172 tcg_temp_free_i32(tmp32_1);
2173 break;
2174 case 0x75: /* LZDR R1 [RRE] */
2175 tmp32_1 = tcg_const_i32(r1);
2176 gen_helper_lzdr(cpu_env, tmp32_1);
2177 tcg_temp_free_i32(tmp32_1);
2178 break;
2179 case 0x76: /* LZXR R1 [RRE] */
2180 tmp32_1 = tcg_const_i32(r1);
2181 gen_helper_lzxr(cpu_env, tmp32_1);
2182 tcg_temp_free_i32(tmp32_1);
2183 break;
2184 case 0x84: /* SFPC R1 [RRE] */
2185 tmp32_1 = load_reg32(r1);
2186 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2187 tcg_temp_free_i32(tmp32_1);
2188 break;
2189 case 0x8c: /* EFPC R1 [RRE] */
2190 tmp32_1 = tcg_temp_new_i32();
2191 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2192 store_reg32(r1, tmp32_1);
2193 tcg_temp_free_i32(tmp32_1);
2194 break;
2195 case 0x94: /* CEFBR R1,R2 [RRE] */
2196 case 0x95: /* CDFBR R1,R2 [RRE] */
2197 case 0x96: /* CXFBR R1,R2 [RRE] */
2198 tmp32_1 = tcg_const_i32(r1);
2199 tmp32_2 = load_reg32(r2);
2200 switch (op) {
2201 case 0x94:
2202 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
2203 break;
2204 case 0x95:
2205 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
2206 break;
2207 case 0x96:
2208 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
2209 break;
2210 default:
2211 tcg_abort();
2213 tcg_temp_free_i32(tmp32_1);
2214 tcg_temp_free_i32(tmp32_2);
2215 break;
2216 case 0x98: /* CFEBR R1,R2 [RRE] */
2217 case 0x99: /* CFDBR R1,R2 [RRE] */
2218 case 0x9a: /* CFXBR R1,R2 [RRE] */
2219 tmp32_1 = tcg_const_i32(r1);
2220 tmp32_2 = tcg_const_i32(r2);
2221 tmp32_3 = tcg_const_i32(m3);
2222 switch (op) {
2223 case 0x98:
2224 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2225 break;
2226 case 0x99:
2227 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2228 break;
2229 case 0x9a:
2230 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2231 break;
2232 default:
2233 tcg_abort();
2235 set_cc_static(s);
2236 tcg_temp_free_i32(tmp32_1);
2237 tcg_temp_free_i32(tmp32_2);
2238 tcg_temp_free_i32(tmp32_3);
2239 break;
2240 case 0xa4: /* CEGBR R1,R2 [RRE] */
2241 case 0xa5: /* CDGBR R1,R2 [RRE] */
2242 tmp32_1 = tcg_const_i32(r1);
2243 tmp = load_reg(r2);
2244 switch (op) {
2245 case 0xa4:
2246 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
2247 break;
2248 case 0xa5:
2249 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
2250 break;
2251 default:
2252 tcg_abort();
2254 tcg_temp_free_i32(tmp32_1);
2255 tcg_temp_free_i64(tmp);
2256 break;
2257 case 0xa6: /* CXGBR R1,R2 [RRE] */
2258 tmp32_1 = tcg_const_i32(r1);
2259 tmp = load_reg(r2);
2260 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
2261 tcg_temp_free_i32(tmp32_1);
2262 tcg_temp_free_i64(tmp);
2263 break;
2264 case 0xa8: /* CGEBR R1,R2 [RRE] */
2265 tmp32_1 = tcg_const_i32(r1);
2266 tmp32_2 = tcg_const_i32(r2);
2267 tmp32_3 = tcg_const_i32(m3);
2268 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2269 set_cc_static(s);
2270 tcg_temp_free_i32(tmp32_1);
2271 tcg_temp_free_i32(tmp32_2);
2272 tcg_temp_free_i32(tmp32_3);
2273 break;
2274 case 0xa9: /* CGDBR R1,R2 [RRE] */
2275 tmp32_1 = tcg_const_i32(r1);
2276 tmp32_2 = tcg_const_i32(r2);
2277 tmp32_3 = tcg_const_i32(m3);
2278 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2279 set_cc_static(s);
2280 tcg_temp_free_i32(tmp32_1);
2281 tcg_temp_free_i32(tmp32_2);
2282 tcg_temp_free_i32(tmp32_3);
2283 break;
2284 case 0xaa: /* CGXBR R1,R2 [RRE] */
2285 tmp32_1 = tcg_const_i32(r1);
2286 tmp32_2 = tcg_const_i32(r2);
2287 tmp32_3 = tcg_const_i32(m3);
2288 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2289 set_cc_static(s);
2290 tcg_temp_free_i32(tmp32_1);
2291 tcg_temp_free_i32(tmp32_2);
2292 tcg_temp_free_i32(tmp32_3);
2293 break;
2294 default:
2295 LOG_DISAS("illegal b3 operation 0x%x\n", op);
2296 gen_illegal_opcode(s);
2297 break;
2300 #undef FP_HELPER_CC
2301 #undef FP_HELPER
2304 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
2305 int r2)
2307 TCGv_i64 tmp;
2308 TCGv_i32 tmp32_1;
2310 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
2311 switch (op) {
2312 case 0x17: /* LLGTR R1,R2 [RRE] */
2313 tmp32_1 = load_reg32(r2);
2314 tmp = tcg_temp_new_i64();
2315 tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
2316 tcg_gen_extu_i32_i64(tmp, tmp32_1);
2317 store_reg(r1, tmp);
2318 tcg_temp_free_i32(tmp32_1);
2319 tcg_temp_free_i64(tmp);
2320 break;
2321 case 0x0f: /* LRVGR R1,R2 [RRE] */
2322 tcg_gen_bswap64_i64(regs[r1], regs[r2]);
2323 break;
2324 case 0x1f: /* LRVR R1,R2 [RRE] */
2325 tmp32_1 = load_reg32(r2);
2326 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
2327 store_reg32(r1, tmp32_1);
2328 tcg_temp_free_i32(tmp32_1);
2329 break;
2330 case 0x83: /* FLOGR R1,R2 [RRE] */
2331 tmp = load_reg(r2);
2332 tmp32_1 = tcg_const_i32(r1);
2333 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
2334 set_cc_static(s);
2335 tcg_temp_free_i64(tmp);
2336 tcg_temp_free_i32(tmp32_1);
2337 break;
2338 default:
2339 LOG_DISAS("illegal b9 operation 0x%x\n", op);
2340 gen_illegal_opcode(s);
2341 break;
2345 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
2347 TCGv_i64 tmp, tmp2, tmp3, tmp4;
2348 TCGv_i32 tmp32_1, tmp32_2;
2349 unsigned char opc;
2350 uint64_t insn;
2351 int op, r1, r2, r3, d1, d2, x2, b1, b2, i, i2, r1b;
2352 TCGv_i32 vl;
2354 opc = cpu_ldub_code(env, s->pc);
2355 LOG_DISAS("opc 0x%x\n", opc);
2357 switch (opc) {
2358 #ifndef CONFIG_USER_ONLY
2359 case 0x82: /* LPSW D2(B2) [S] */
2360 /* Load PSW */
2361 check_privileged(s);
2362 insn = ld_code4(env, s->pc);
2363 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2364 tmp = get_address(s, 0, b2, d2);
2365 tmp2 = tcg_temp_new_i64();
2366 tmp3 = tcg_temp_new_i64();
2367 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
2368 tcg_gen_addi_i64(tmp, tmp, 4);
2369 tcg_gen_qemu_ld32u(tmp3, tmp, get_mem_index(s));
2370 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2371 tcg_gen_shli_i64(tmp2, tmp2, 32);
2372 gen_helper_load_psw(cpu_env, tmp2, tmp3);
2373 tcg_temp_free_i64(tmp);
2374 tcg_temp_free_i64(tmp2);
2375 tcg_temp_free_i64(tmp3);
2376 /* we need to keep cc_op intact */
2377 s->is_jmp = DISAS_JUMP;
2378 break;
2379 case 0x83: /* DIAG R1,R3,D2 [RS] */
2380 /* Diagnose call (KVM hypercall) */
2381 check_privileged(s);
2382 potential_page_fault(s);
2383 insn = ld_code4(env, s->pc);
2384 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2385 tmp32_1 = tcg_const_i32(insn & 0xfff);
2386 tmp2 = load_reg(2);
2387 tmp3 = load_reg(1);
2388 gen_helper_diag(tmp2, cpu_env, tmp32_1, tmp2, tmp3);
2389 store_reg(2, tmp2);
2390 tcg_temp_free_i32(tmp32_1);
2391 tcg_temp_free_i64(tmp2);
2392 tcg_temp_free_i64(tmp3);
2393 break;
2394 #endif
2395 case 0x88: /* SRL R1,D2(B2) [RS] */
2396 case 0x89: /* SLL R1,D2(B2) [RS] */
2397 case 0x8a: /* SRA R1,D2(B2) [RS] */
2398 insn = ld_code4(env, s->pc);
2399 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2400 tmp = get_address(s, 0, b2, d2);
2401 tmp32_1 = load_reg32(r1);
2402 tmp32_2 = tcg_temp_new_i32();
2403 tcg_gen_trunc_i64_i32(tmp32_2, tmp);
2404 tcg_gen_andi_i32(tmp32_2, tmp32_2, 0x3f);
2405 switch (opc) {
2406 case 0x88:
2407 tcg_gen_shr_i32(tmp32_1, tmp32_1, tmp32_2);
2408 break;
2409 case 0x89:
2410 tcg_gen_shl_i32(tmp32_1, tmp32_1, tmp32_2);
2411 break;
2412 case 0x8a:
2413 tcg_gen_sar_i32(tmp32_1, tmp32_1, tmp32_2);
2414 set_cc_s32(s, tmp32_1);
2415 break;
2416 default:
2417 tcg_abort();
2419 store_reg32(r1, tmp32_1);
2420 tcg_temp_free_i64(tmp);
2421 tcg_temp_free_i32(tmp32_1);
2422 tcg_temp_free_i32(tmp32_2);
2423 break;
2424 case 0x8c: /* SRDL R1,D2(B2) [RS] */
2425 case 0x8d: /* SLDL R1,D2(B2) [RS] */
2426 case 0x8e: /* SRDA R1,D2(B2) [RS] */
2427 insn = ld_code4(env, s->pc);
2428 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2429 tmp = get_address(s, 0, b2, d2); /* shift */
2430 tmp2 = tcg_temp_new_i64();
2431 tmp32_1 = load_reg32(r1);
2432 tmp32_2 = load_reg32(r1 + 1);
2433 tcg_gen_concat_i32_i64(tmp2, tmp32_2, tmp32_1); /* operand */
2434 switch (opc) {
2435 case 0x8c:
2436 tcg_gen_shr_i64(tmp2, tmp2, tmp);
2437 break;
2438 case 0x8d:
2439 tcg_gen_shl_i64(tmp2, tmp2, tmp);
2440 break;
2441 case 0x8e:
2442 tcg_gen_sar_i64(tmp2, tmp2, tmp);
2443 set_cc_s64(s, tmp2);
2444 break;
2446 tcg_gen_shri_i64(tmp, tmp2, 32);
2447 tcg_gen_trunc_i64_i32(tmp32_1, tmp);
2448 store_reg32(r1, tmp32_1);
2449 tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
2450 store_reg32(r1 + 1, tmp32_2);
2451 tcg_temp_free_i64(tmp);
2452 tcg_temp_free_i64(tmp2);
2453 break;
2454 case 0x98: /* LM R1,R3,D2(B2) [RS] */
2455 case 0x90: /* STM R1,R3,D2(B2) [RS] */
2456 insn = ld_code4(env, s->pc);
2457 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2459 tmp = get_address(s, 0, b2, d2);
2460 tmp2 = tcg_temp_new_i64();
2461 tmp3 = tcg_const_i64(4);
2462 tmp4 = tcg_const_i64(0xffffffff00000000ULL);
2463 for (i = r1;; i = (i + 1) % 16) {
2464 if (opc == 0x98) {
2465 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
2466 tcg_gen_and_i64(regs[i], regs[i], tmp4);
2467 tcg_gen_or_i64(regs[i], regs[i], tmp2);
2468 } else {
2469 tcg_gen_qemu_st32(regs[i], tmp, get_mem_index(s));
2471 if (i == r3) {
2472 break;
2474 tcg_gen_add_i64(tmp, tmp, tmp3);
2476 tcg_temp_free_i64(tmp);
2477 tcg_temp_free_i64(tmp2);
2478 tcg_temp_free_i64(tmp3);
2479 tcg_temp_free_i64(tmp4);
2480 break;
2481 case 0x92: /* MVI D1(B1),I2 [SI] */
2482 insn = ld_code4(env, s->pc);
2483 tmp = decode_si(s, insn, &i2, &b1, &d1);
2484 tmp2 = tcg_const_i64(i2);
2485 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2486 tcg_temp_free_i64(tmp);
2487 tcg_temp_free_i64(tmp2);
2488 break;
2489 case 0x94: /* NI D1(B1),I2 [SI] */
2490 case 0x96: /* OI D1(B1),I2 [SI] */
2491 case 0x97: /* XI D1(B1),I2 [SI] */
2492 insn = ld_code4(env, s->pc);
2493 tmp = decode_si(s, insn, &i2, &b1, &d1);
2494 tmp2 = tcg_temp_new_i64();
2495 tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
2496 switch (opc) {
2497 case 0x94:
2498 tcg_gen_andi_i64(tmp2, tmp2, i2);
2499 break;
2500 case 0x96:
2501 tcg_gen_ori_i64(tmp2, tmp2, i2);
2502 break;
2503 case 0x97:
2504 tcg_gen_xori_i64(tmp2, tmp2, i2);
2505 break;
2506 default:
2507 tcg_abort();
2509 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2510 set_cc_nz_u64(s, tmp2);
2511 tcg_temp_free_i64(tmp);
2512 tcg_temp_free_i64(tmp2);
2513 break;
2514 case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
2515 insn = ld_code4(env, s->pc);
2516 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2517 tmp = get_address(s, 0, b2, d2);
2518 tmp32_1 = tcg_const_i32(r1);
2519 tmp32_2 = tcg_const_i32(r3);
2520 potential_page_fault(s);
2521 gen_helper_lam(cpu_env, tmp32_1, tmp, tmp32_2);
2522 tcg_temp_free_i64(tmp);
2523 tcg_temp_free_i32(tmp32_1);
2524 tcg_temp_free_i32(tmp32_2);
2525 break;
2526 case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
2527 insn = ld_code4(env, s->pc);
2528 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2529 tmp = get_address(s, 0, b2, d2);
2530 tmp32_1 = tcg_const_i32(r1);
2531 tmp32_2 = tcg_const_i32(r3);
2532 potential_page_fault(s);
2533 gen_helper_stam(cpu_env, tmp32_1, tmp, tmp32_2);
2534 tcg_temp_free_i64(tmp);
2535 tcg_temp_free_i32(tmp32_1);
2536 tcg_temp_free_i32(tmp32_2);
2537 break;
2538 case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
2539 insn = ld_code4(env, s->pc);
2540 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2541 tmp = get_address(s, 0, b2, d2);
2542 tmp32_1 = tcg_const_i32(r1);
2543 tmp32_2 = tcg_const_i32(r3);
2544 potential_page_fault(s);
2545 gen_helper_mvcle(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2546 set_cc_static(s);
2547 tcg_temp_free_i64(tmp);
2548 tcg_temp_free_i32(tmp32_1);
2549 tcg_temp_free_i32(tmp32_2);
2550 break;
2551 case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
2552 insn = ld_code4(env, s->pc);
2553 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2554 tmp = get_address(s, 0, b2, d2);
2555 tmp32_1 = tcg_const_i32(r1);
2556 tmp32_2 = tcg_const_i32(r3);
2557 potential_page_fault(s);
2558 gen_helper_clcle(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2559 set_cc_static(s);
2560 tcg_temp_free_i64(tmp);
2561 tcg_temp_free_i32(tmp32_1);
2562 tcg_temp_free_i32(tmp32_2);
2563 break;
2564 #ifndef CONFIG_USER_ONLY
2565 case 0xac: /* STNSM D1(B1),I2 [SI] */
2566 case 0xad: /* STOSM D1(B1),I2 [SI] */
2567 check_privileged(s);
2568 insn = ld_code4(env, s->pc);
2569 tmp = decode_si(s, insn, &i2, &b1, &d1);
2570 tmp2 = tcg_temp_new_i64();
2571 tcg_gen_shri_i64(tmp2, psw_mask, 56);
2572 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2573 if (opc == 0xac) {
2574 tcg_gen_andi_i64(psw_mask, psw_mask,
2575 ((uint64_t)i2 << 56) | 0x00ffffffffffffffULL);
2576 } else {
2577 tcg_gen_ori_i64(psw_mask, psw_mask, (uint64_t)i2 << 56);
2579 tcg_temp_free_i64(tmp);
2580 tcg_temp_free_i64(tmp2);
2581 break;
2582 case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
2583 check_privileged(s);
2584 insn = ld_code4(env, s->pc);
2585 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2586 tmp = get_address(s, 0, b2, d2);
2587 tmp2 = load_reg(r3);
2588 tmp32_1 = tcg_const_i32(r1);
2589 potential_page_fault(s);
2590 gen_helper_sigp(cc_op, cpu_env, tmp, tmp32_1, tmp2);
2591 set_cc_static(s);
2592 tcg_temp_free_i64(tmp);
2593 tcg_temp_free_i64(tmp2);
2594 tcg_temp_free_i32(tmp32_1);
2595 break;
2596 case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
2597 check_privileged(s);
2598 insn = ld_code4(env, s->pc);
2599 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2600 tmp32_1 = tcg_const_i32(r1);
2601 potential_page_fault(s);
2602 gen_helper_lra(cc_op, cpu_env, tmp, tmp32_1);
2603 set_cc_static(s);
2604 tcg_temp_free_i64(tmp);
2605 tcg_temp_free_i32(tmp32_1);
2606 break;
2607 #endif
2608 case 0xb2:
2609 insn = ld_code4(env, s->pc);
2610 op = (insn >> 16) & 0xff;
2611 switch (op) {
2612 case 0x9c: /* STFPC D2(B2) [S] */
2613 d2 = insn & 0xfff;
2614 b2 = (insn >> 12) & 0xf;
2615 tmp32_1 = tcg_temp_new_i32();
2616 tmp = tcg_temp_new_i64();
2617 tmp2 = get_address(s, 0, b2, d2);
2618 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2619 tcg_gen_extu_i32_i64(tmp, tmp32_1);
2620 tcg_gen_qemu_st32(tmp, tmp2, get_mem_index(s));
2621 tcg_temp_free_i32(tmp32_1);
2622 tcg_temp_free_i64(tmp);
2623 tcg_temp_free_i64(tmp2);
2624 break;
2625 default:
2626 disas_b2(env, s, op, insn);
2627 break;
2629 break;
2630 case 0xb3:
2631 insn = ld_code4(env, s->pc);
2632 op = (insn >> 16) & 0xff;
2633 r3 = (insn >> 12) & 0xf; /* aka m3 */
2634 r1 = (insn >> 4) & 0xf;
2635 r2 = insn & 0xf;
2636 disas_b3(env, s, op, r3, r1, r2);
2637 break;
2638 #ifndef CONFIG_USER_ONLY
2639 case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
2640 /* Store Control */
2641 check_privileged(s);
2642 insn = ld_code4(env, s->pc);
2643 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2644 tmp = get_address(s, 0, b2, d2);
2645 tmp32_1 = tcg_const_i32(r1);
2646 tmp32_2 = tcg_const_i32(r3);
2647 potential_page_fault(s);
2648 gen_helper_stctl(cpu_env, tmp32_1, tmp, tmp32_2);
2649 tcg_temp_free_i64(tmp);
2650 tcg_temp_free_i32(tmp32_1);
2651 tcg_temp_free_i32(tmp32_2);
2652 break;
2653 case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
2654 /* Load Control */
2655 check_privileged(s);
2656 insn = ld_code4(env, s->pc);
2657 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2658 tmp = get_address(s, 0, b2, d2);
2659 tmp32_1 = tcg_const_i32(r1);
2660 tmp32_2 = tcg_const_i32(r3);
2661 potential_page_fault(s);
2662 gen_helper_lctl(cpu_env, tmp32_1, tmp, tmp32_2);
2663 tcg_temp_free_i64(tmp);
2664 tcg_temp_free_i32(tmp32_1);
2665 tcg_temp_free_i32(tmp32_2);
2666 break;
2667 #endif
2668 case 0xb9:
2669 insn = ld_code4(env, s->pc);
2670 r1 = (insn >> 4) & 0xf;
2671 r2 = insn & 0xf;
2672 op = (insn >> 16) & 0xff;
2673 disas_b9(env, s, op, r1, r2);
2674 break;
2675 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2676 insn = ld_code4(env, s->pc);
2677 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2678 tmp = get_address(s, 0, b2, d2);
2679 tmp32_1 = tcg_const_i32(r1);
2680 tmp32_2 = tcg_const_i32(r3);
2681 potential_page_fault(s);
2682 gen_helper_cs(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2683 set_cc_static(s);
2684 tcg_temp_free_i64(tmp);
2685 tcg_temp_free_i32(tmp32_1);
2686 tcg_temp_free_i32(tmp32_2);
2687 break;
2688 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2689 insn = ld_code4(env, s->pc);
2690 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2691 tmp = get_address(s, 0, b2, d2);
2692 tmp32_1 = load_reg32(r1);
2693 tmp32_2 = tcg_const_i32(r3);
2694 potential_page_fault(s);
2695 gen_helper_clm(cc_op, cpu_env, tmp32_1, tmp32_2, tmp);
2696 set_cc_static(s);
2697 tcg_temp_free_i64(tmp);
2698 tcg_temp_free_i32(tmp32_1);
2699 tcg_temp_free_i32(tmp32_2);
2700 break;
2701 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2702 insn = ld_code4(env, s->pc);
2703 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2704 tmp = get_address(s, 0, b2, d2);
2705 tmp32_1 = load_reg32(r1);
2706 tmp32_2 = tcg_const_i32(r3);
2707 potential_page_fault(s);
2708 gen_helper_stcm(cpu_env, tmp32_1, tmp32_2, tmp);
2709 tcg_temp_free_i64(tmp);
2710 tcg_temp_free_i32(tmp32_1);
2711 tcg_temp_free_i32(tmp32_2);
2712 break;
2713 case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */
2714 case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
2715 case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
2716 case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
2717 case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
2718 case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
2719 case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
2720 insn = ld_code6(env, s->pc);
2721 vl = tcg_const_i32((insn >> 32) & 0xff);
2722 b1 = (insn >> 28) & 0xf;
2723 b2 = (insn >> 12) & 0xf;
2724 d1 = (insn >> 16) & 0xfff;
2725 d2 = insn & 0xfff;
2726 tmp = get_address(s, 0, b1, d1);
2727 tmp2 = get_address(s, 0, b2, d2);
2728 switch (opc) {
2729 case 0xd2:
2730 gen_op_mvc(s, (insn >> 32) & 0xff, tmp, tmp2);
2731 break;
2732 case 0xd4:
2733 potential_page_fault(s);
2734 gen_helper_nc(cc_op, cpu_env, vl, tmp, tmp2);
2735 set_cc_static(s);
2736 break;
2737 case 0xd5:
2738 gen_op_clc(s, (insn >> 32) & 0xff, tmp, tmp2);
2739 break;
2740 case 0xd6:
2741 potential_page_fault(s);
2742 gen_helper_oc(cc_op, cpu_env, vl, tmp, tmp2);
2743 set_cc_static(s);
2744 break;
2745 case 0xd7:
2746 potential_page_fault(s);
2747 gen_helper_xc(cc_op, cpu_env, vl, tmp, tmp2);
2748 set_cc_static(s);
2749 break;
2750 case 0xdc:
2751 potential_page_fault(s);
2752 gen_helper_tr(cpu_env, vl, tmp, tmp2);
2753 set_cc_static(s);
2754 break;
2755 case 0xf3:
2756 potential_page_fault(s);
2757 gen_helper_unpk(cpu_env, vl, tmp, tmp2);
2758 break;
2759 default:
2760 tcg_abort();
2762 tcg_temp_free_i64(tmp);
2763 tcg_temp_free_i64(tmp2);
2764 break;
2765 #ifndef CONFIG_USER_ONLY
2766 case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
2767 case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
2768 check_privileged(s);
2769 potential_page_fault(s);
2770 insn = ld_code6(env, s->pc);
2771 r1 = (insn >> 36) & 0xf;
2772 r3 = (insn >> 32) & 0xf;
2773 b1 = (insn >> 28) & 0xf;
2774 d1 = (insn >> 16) & 0xfff;
2775 b2 = (insn >> 12) & 0xf;
2776 d2 = insn & 0xfff;
2777 tmp = load_reg(r1);
2778 /* XXX key in r3 */
2779 tmp2 = get_address(s, 0, b1, d1);
2780 tmp3 = get_address(s, 0, b2, d2);
2781 if (opc == 0xda) {
2782 gen_helper_mvcp(cc_op, cpu_env, tmp, tmp2, tmp3);
2783 } else {
2784 gen_helper_mvcs(cc_op, cpu_env, tmp, tmp2, tmp3);
2786 set_cc_static(s);
2787 tcg_temp_free_i64(tmp);
2788 tcg_temp_free_i64(tmp2);
2789 tcg_temp_free_i64(tmp3);
2790 break;
2791 #endif
2792 case 0xe3:
2793 insn = ld_code6(env, s->pc);
2794 debug_insn(insn);
2795 op = insn & 0xff;
2796 r1 = (insn >> 36) & 0xf;
2797 x2 = (insn >> 32) & 0xf;
2798 b2 = (insn >> 28) & 0xf;
2799 d2 = ((int)((((insn >> 16) & 0xfff)
2800 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2801 disas_e3(env, s, op, r1, x2, b2, d2 );
2802 break;
2803 #ifndef CONFIG_USER_ONLY
2804 case 0xe5:
2805 /* Test Protection */
2806 check_privileged(s);
2807 insn = ld_code6(env, s->pc);
2808 debug_insn(insn);
2809 disas_e5(env, s, insn);
2810 break;
2811 #endif
2812 case 0xeb:
2813 insn = ld_code6(env, s->pc);
2814 debug_insn(insn);
2815 op = insn & 0xff;
2816 r1 = (insn >> 36) & 0xf;
2817 r3 = (insn >> 32) & 0xf;
2818 b2 = (insn >> 28) & 0xf;
2819 d2 = ((int)((((insn >> 16) & 0xfff)
2820 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2821 disas_eb(env, s, op, r1, r3, b2, d2);
2822 break;
2823 case 0xed:
2824 insn = ld_code6(env, s->pc);
2825 debug_insn(insn);
2826 op = insn & 0xff;
2827 r1 = (insn >> 36) & 0xf;
2828 x2 = (insn >> 32) & 0xf;
2829 b2 = (insn >> 28) & 0xf;
2830 d2 = (short)((insn >> 16) & 0xfff);
2831 r1b = (insn >> 12) & 0xf;
2832 disas_ed(env, s, op, r1, x2, b2, d2, r1b);
2833 break;
2834 default:
2835 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
2836 gen_illegal_opcode(s);
2837 break;
2841 /* ====================================================================== */
2842 /* Define the insn format enumeration. */
2843 #define F0(N) FMT_##N,
2844 #define F1(N, X1) F0(N)
2845 #define F2(N, X1, X2) F0(N)
2846 #define F3(N, X1, X2, X3) F0(N)
2847 #define F4(N, X1, X2, X3, X4) F0(N)
2848 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2850 typedef enum {
2851 #include "insn-format.def"
2852 } DisasFormat;
2854 #undef F0
2855 #undef F1
2856 #undef F2
2857 #undef F3
2858 #undef F4
2859 #undef F5
2861 /* Define a structure to hold the decoded fields. We'll store each inside
2862 an array indexed by an enum. In order to conserve memory, we'll arrange
2863 for fields that do not exist at the same time to overlap, thus the "C"
2864 for compact. For checking purposes there is an "O" for original index
2865 as well that will be applied to availability bitmaps. */
2867 enum DisasFieldIndexO {
2868 FLD_O_r1,
2869 FLD_O_r2,
2870 FLD_O_r3,
2871 FLD_O_m1,
2872 FLD_O_m3,
2873 FLD_O_m4,
2874 FLD_O_b1,
2875 FLD_O_b2,
2876 FLD_O_b4,
2877 FLD_O_d1,
2878 FLD_O_d2,
2879 FLD_O_d4,
2880 FLD_O_x2,
2881 FLD_O_l1,
2882 FLD_O_l2,
2883 FLD_O_i1,
2884 FLD_O_i2,
2885 FLD_O_i3,
2886 FLD_O_i4,
2887 FLD_O_i5
2890 enum DisasFieldIndexC {
2891 FLD_C_r1 = 0,
2892 FLD_C_m1 = 0,
2893 FLD_C_b1 = 0,
2894 FLD_C_i1 = 0,
2896 FLD_C_r2 = 1,
2897 FLD_C_b2 = 1,
2898 FLD_C_i2 = 1,
2900 FLD_C_r3 = 2,
2901 FLD_C_m3 = 2,
2902 FLD_C_i3 = 2,
2904 FLD_C_m4 = 3,
2905 FLD_C_b4 = 3,
2906 FLD_C_i4 = 3,
2907 FLD_C_l1 = 3,
2909 FLD_C_i5 = 4,
2910 FLD_C_d1 = 4,
2912 FLD_C_d2 = 5,
2914 FLD_C_d4 = 6,
2915 FLD_C_x2 = 6,
2916 FLD_C_l2 = 6,
2918 NUM_C_FIELD = 7
2921 struct DisasFields {
2922 unsigned op:8;
2923 unsigned op2:8;
2924 unsigned presentC:16;
2925 unsigned int presentO;
2926 int c[NUM_C_FIELD];
2929 /* This is the way fields are to be accessed out of DisasFields. */
2930 #define have_field(S, F) have_field1((S), FLD_O_##F)
2931 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2933 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
2935 return (f->presentO >> c) & 1;
2938 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
2939 enum DisasFieldIndexC c)
2941 assert(have_field1(f, o));
2942 return f->c[c];
2945 /* Describe the layout of each field in each format. */
2946 typedef struct DisasField {
2947 unsigned int beg:8;
2948 unsigned int size:8;
2949 unsigned int type:2;
2950 unsigned int indexC:6;
2951 enum DisasFieldIndexO indexO:8;
2952 } DisasField;
2954 typedef struct DisasFormatInfo {
2955 DisasField op[NUM_C_FIELD];
2956 } DisasFormatInfo;
2958 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2959 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2960 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2961 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2962 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2963 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2964 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2965 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2966 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2967 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2968 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2969 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2970 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2971 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2973 #define F0(N) { { } },
2974 #define F1(N, X1) { { X1 } },
2975 #define F2(N, X1, X2) { { X1, X2 } },
2976 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2977 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2978 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2980 static const DisasFormatInfo format_info[] = {
2981 #include "insn-format.def"
2984 #undef F0
2985 #undef F1
2986 #undef F2
2987 #undef F3
2988 #undef F4
2989 #undef F5
2990 #undef R
2991 #undef M
2992 #undef BD
2993 #undef BXD
2994 #undef BDL
2995 #undef BXDL
2996 #undef I
2997 #undef L
2999 /* Generally, we'll extract operands into this structures, operate upon
3000 them, and store them back. See the "in1", "in2", "prep", "wout" sets
3001 of routines below for more details. */
3002 typedef struct {
3003 bool g_out, g_out2, g_in1, g_in2;
3004 TCGv_i64 out, out2, in1, in2;
3005 TCGv_i64 addr1;
3006 } DisasOps;
3008 /* Return values from translate_one, indicating the state of the TB. */
3009 typedef enum {
3010 /* Continue the TB. */
3011 NO_EXIT,
3012 /* We have emitted one or more goto_tb. No fixup required. */
3013 EXIT_GOTO_TB,
3014 /* We are not using a goto_tb (for whatever reason), but have updated
3015 the PC (for whatever reason), so there's no need to do it again on
3016 exiting the TB. */
3017 EXIT_PC_UPDATED,
3018 /* We are exiting the TB, but have neither emitted a goto_tb, nor
3019 updated the PC for the next instruction to be executed. */
3020 EXIT_PC_STALE,
3021 /* We are ending the TB with a noreturn function call, e.g. longjmp.
3022 No following code will be executed. */
3023 EXIT_NORETURN,
3024 } ExitStatus;
3026 typedef enum DisasFacility {
3027 FAC_Z, /* zarch (default) */
3028 FAC_CASS, /* compare and swap and store */
3029 FAC_CASS2, /* compare and swap and store 2*/
3030 FAC_DFP, /* decimal floating point */
3031 FAC_DFPR, /* decimal floating point rounding */
3032 FAC_DO, /* distinct operands */
3033 FAC_EE, /* execute extensions */
3034 FAC_EI, /* extended immediate */
3035 FAC_FPE, /* floating point extension */
3036 FAC_FPSSH, /* floating point support sign handling */
3037 FAC_FPRGR, /* FPR-GR transfer */
3038 FAC_GIE, /* general instructions extension */
3039 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
3040 FAC_HW, /* high-word */
3041 FAC_IEEEE_SIM, /* IEEE exception sumilation */
3042 FAC_LOC, /* load/store on condition */
3043 FAC_LD, /* long displacement */
3044 FAC_PC, /* population count */
3045 FAC_SCF, /* store clock fast */
3046 FAC_SFLE, /* store facility list extended */
3047 } DisasFacility;
3049 struct DisasInsn {
3050 unsigned opc:16;
3051 DisasFormat fmt:6;
3052 DisasFacility fac:6;
3054 const char *name;
3056 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
3057 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
3058 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
3059 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
3060 void (*help_cout)(DisasContext *, DisasOps *);
3061 ExitStatus (*help_op)(DisasContext *, DisasOps *);
3063 uint64_t data;
3066 /* ====================================================================== */
3067 /* Miscelaneous helpers, used by several operations. */
3069 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
3071 if (dest == s->next_pc) {
3072 return NO_EXIT;
3074 if (use_goto_tb(s, dest)) {
3075 gen_update_cc_op(s);
3076 tcg_gen_goto_tb(0);
3077 tcg_gen_movi_i64(psw_addr, dest);
3078 tcg_gen_exit_tb((tcg_target_long)s->tb);
3079 return EXIT_GOTO_TB;
3080 } else {
3081 tcg_gen_movi_i64(psw_addr, dest);
3082 return EXIT_PC_UPDATED;
3086 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
3087 bool is_imm, int imm, TCGv_i64 cdest)
3089 ExitStatus ret;
3090 uint64_t dest = s->pc + 2 * imm;
3091 int lab;
3093 /* Take care of the special cases first. */
3094 if (c->cond == TCG_COND_NEVER) {
3095 ret = NO_EXIT;
3096 goto egress;
3098 if (is_imm) {
3099 if (dest == s->next_pc) {
3100 /* Branch to next. */
3101 ret = NO_EXIT;
3102 goto egress;
3104 if (c->cond == TCG_COND_ALWAYS) {
3105 ret = help_goto_direct(s, dest);
3106 goto egress;
3108 } else {
3109 if (TCGV_IS_UNUSED_I64(cdest)) {
3110 /* E.g. bcr %r0 -> no branch. */
3111 ret = NO_EXIT;
3112 goto egress;
3114 if (c->cond == TCG_COND_ALWAYS) {
3115 tcg_gen_mov_i64(psw_addr, cdest);
3116 ret = EXIT_PC_UPDATED;
3117 goto egress;
3121 if (use_goto_tb(s, s->next_pc)) {
3122 if (is_imm && use_goto_tb(s, dest)) {
3123 /* Both exits can use goto_tb. */
3124 gen_update_cc_op(s);
3126 lab = gen_new_label();
3127 if (c->is_64) {
3128 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
3129 } else {
3130 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
3133 /* Branch not taken. */
3134 tcg_gen_goto_tb(0);
3135 tcg_gen_movi_i64(psw_addr, s->next_pc);
3136 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
3138 /* Branch taken. */
3139 gen_set_label(lab);
3140 tcg_gen_goto_tb(1);
3141 tcg_gen_movi_i64(psw_addr, dest);
3142 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
3144 ret = EXIT_GOTO_TB;
3145 } else {
3146 /* Fallthru can use goto_tb, but taken branch cannot. */
3147 /* Store taken branch destination before the brcond. This
3148 avoids having to allocate a new local temp to hold it.
3149 We'll overwrite this in the not taken case anyway. */
3150 if (!is_imm) {
3151 tcg_gen_mov_i64(psw_addr, cdest);
3154 lab = gen_new_label();
3155 if (c->is_64) {
3156 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
3157 } else {
3158 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
3161 /* Branch not taken. */
3162 gen_update_cc_op(s);
3163 tcg_gen_goto_tb(0);
3164 tcg_gen_movi_i64(psw_addr, s->next_pc);
3165 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
3167 gen_set_label(lab);
3168 if (is_imm) {
3169 tcg_gen_movi_i64(psw_addr, dest);
3171 ret = EXIT_PC_UPDATED;
3173 } else {
3174 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
3175 Most commonly we're single-stepping or some other condition that
3176 disables all use of goto_tb. Just update the PC and exit. */
3178 TCGv_i64 next = tcg_const_i64(s->next_pc);
3179 if (is_imm) {
3180 cdest = tcg_const_i64(dest);
3183 if (c->is_64) {
3184 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
3185 cdest, next);
3186 } else {
3187 TCGv_i32 t0 = tcg_temp_new_i32();
3188 TCGv_i64 t1 = tcg_temp_new_i64();
3189 TCGv_i64 z = tcg_const_i64(0);
3190 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
3191 tcg_gen_extu_i32_i64(t1, t0);
3192 tcg_temp_free_i32(t0);
3193 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
3194 tcg_temp_free_i64(t1);
3195 tcg_temp_free_i64(z);
3198 if (is_imm) {
3199 tcg_temp_free_i64(cdest);
3201 tcg_temp_free_i64(next);
3203 ret = EXIT_PC_UPDATED;
3206 egress:
3207 free_compare(c);
3208 return ret;
3211 /* ====================================================================== */
3212 /* The operations. These perform the bulk of the work for any insn,
3213 usually after the operands have been loaded and output initialized. */
3215 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
3217 gen_helper_abs_i64(o->out, o->in2);
3218 return NO_EXIT;
3221 static ExitStatus op_add(DisasContext *s, DisasOps *o)
3223 tcg_gen_add_i64(o->out, o->in1, o->in2);
3224 return NO_EXIT;
3227 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
3229 TCGv_i64 cc;
3231 tcg_gen_add_i64(o->out, o->in1, o->in2);
3233 /* XXX possible optimization point */
3234 gen_op_calc_cc(s);
3235 cc = tcg_temp_new_i64();
3236 tcg_gen_extu_i32_i64(cc, cc_op);
3237 tcg_gen_shri_i64(cc, cc, 1);
3239 tcg_gen_add_i64(o->out, o->out, cc);
3240 tcg_temp_free_i64(cc);
3241 return NO_EXIT;
3244 static ExitStatus op_and(DisasContext *s, DisasOps *o)
3246 tcg_gen_and_i64(o->out, o->in1, o->in2);
3247 return NO_EXIT;
3250 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
3252 int shift = s->insn->data & 0xff;
3253 int size = s->insn->data >> 8;
3254 uint64_t mask = ((1ull << size) - 1) << shift;
3256 assert(!o->g_in2);
3257 tcg_gen_shli_i64(o->in2, o->in2, shift);
3258 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3259 tcg_gen_and_i64(o->out, o->in1, o->in2);
3261 /* Produce the CC from only the bits manipulated. */
3262 tcg_gen_andi_i64(cc_dst, o->out, mask);
3263 set_cc_nz_u64(s, cc_dst);
3264 return NO_EXIT;
3267 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
3269 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
3270 if (!TCGV_IS_UNUSED_I64(o->in2)) {
3271 tcg_gen_mov_i64(psw_addr, o->in2);
3272 return EXIT_PC_UPDATED;
3273 } else {
3274 return NO_EXIT;
3278 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
3280 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
3281 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
3284 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
3286 int m1 = get_field(s->fields, m1);
3287 bool is_imm = have_field(s->fields, i2);
3288 int imm = is_imm ? get_field(s->fields, i2) : 0;
3289 DisasCompare c;
3291 disas_jcc(s, &c, m1);
3292 return help_branch(s, &c, is_imm, imm, o->in2);
3295 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
3297 int r1 = get_field(s->fields, r1);
3298 bool is_imm = have_field(s->fields, i2);
3299 int imm = is_imm ? get_field(s->fields, i2) : 0;
3300 DisasCompare c;
3301 TCGv_i64 t;
3303 c.cond = TCG_COND_NE;
3304 c.is_64 = false;
3305 c.g1 = false;
3306 c.g2 = false;
3308 t = tcg_temp_new_i64();
3309 tcg_gen_subi_i64(t, regs[r1], 1);
3310 store_reg32_i64(r1, t);
3311 c.u.s32.a = tcg_temp_new_i32();
3312 c.u.s32.b = tcg_const_i32(0);
3313 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
3314 tcg_temp_free_i64(t);
3316 return help_branch(s, &c, is_imm, imm, o->in2);
3319 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
3321 int r1 = get_field(s->fields, r1);
3322 bool is_imm = have_field(s->fields, i2);
3323 int imm = is_imm ? get_field(s->fields, i2) : 0;
3324 DisasCompare c;
3326 c.cond = TCG_COND_NE;
3327 c.is_64 = true;
3328 c.g1 = true;
3329 c.g2 = false;
3331 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
3332 c.u.s64.a = regs[r1];
3333 c.u.s64.b = tcg_const_i64(0);
3335 return help_branch(s, &c, is_imm, imm, o->in2);
3338 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
3340 TCGv_i64 t1 = tcg_temp_new_i64();
3341 TCGv_i32 t2 = tcg_temp_new_i32();
3342 tcg_gen_trunc_i64_i32(t2, o->in1);
3343 gen_helper_cvd(t1, t2);
3344 tcg_temp_free_i32(t2);
3345 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
3346 tcg_temp_free_i64(t1);
3347 return NO_EXIT;
3350 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
3352 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
3353 return_low128(o->out);
3354 return NO_EXIT;
3357 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
3359 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
3360 return_low128(o->out);
3361 return NO_EXIT;
3364 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
3366 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
3367 return_low128(o->out);
3368 return NO_EXIT;
3371 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
3373 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
3374 return_low128(o->out);
3375 return NO_EXIT;
3378 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
3380 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
3381 tb->flags, (ab)use the tb->cs_base field as the address of
3382 the template in memory, and grab 8 bits of tb->flags/cflags for
3383 the contents of the register. We would then recognize all this
3384 in gen_intermediate_code_internal, generating code for exactly
3385 one instruction. This new TB then gets executed normally.
3387 On the other hand, this seems to be mostly used for modifying
3388 MVC inside of memcpy, which needs a helper call anyway. So
3389 perhaps this doesn't bear thinking about any further. */
3391 TCGv_i64 tmp;
3393 update_psw_addr(s);
3394 gen_op_calc_cc(s);
3396 tmp = tcg_const_i64(s->next_pc);
3397 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
3398 tcg_temp_free_i64(tmp);
3400 set_cc_static(s);
3401 return NO_EXIT;
3404 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
3406 int m3 = get_field(s->fields, m3);
3407 int pos, len, base = s->insn->data;
3408 TCGv_i64 tmp = tcg_temp_new_i64();
3409 uint64_t ccm;
3411 switch (m3) {
3412 case 0xf:
3413 /* Effectively a 32-bit load. */
3414 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
3415 len = 32;
3416 goto one_insert;
3418 case 0xc:
3419 case 0x6:
3420 case 0x3:
3421 /* Effectively a 16-bit load. */
3422 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
3423 len = 16;
3424 goto one_insert;
3426 case 0x8:
3427 case 0x4:
3428 case 0x2:
3429 case 0x1:
3430 /* Effectively an 8-bit load. */
3431 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
3432 len = 8;
3433 goto one_insert;
3435 one_insert:
3436 pos = base + ctz32(m3) * 8;
3437 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
3438 ccm = ((1ull << len) - 1) << pos;
3439 break;
3441 default:
3442 /* This is going to be a sequence of loads and inserts. */
3443 pos = base + 32 - 8;
3444 ccm = 0;
3445 while (m3) {
3446 if (m3 & 0x8) {
3447 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
3448 tcg_gen_addi_i64(o->in2, o->in2, 1);
3449 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
3450 ccm |= 0xff << pos;
3452 m3 = (m3 << 1) & 0xf;
3453 pos -= 8;
3455 break;
3458 tcg_gen_movi_i64(tmp, ccm);
3459 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
3460 tcg_temp_free_i64(tmp);
3461 return NO_EXIT;
3464 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
3466 int shift = s->insn->data & 0xff;
3467 int size = s->insn->data >> 8;
3468 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
3469 return NO_EXIT;
3472 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
3474 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
3475 return NO_EXIT;
3478 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
3480 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
3481 return NO_EXIT;
3484 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
3486 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
3487 return NO_EXIT;
3490 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
3492 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
3493 return NO_EXIT;
3496 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
3498 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
3499 return NO_EXIT;
3502 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
3504 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
3505 return NO_EXIT;
3508 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
3510 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
3511 return NO_EXIT;
3514 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3516 o->out = o->in2;
3517 o->g_out = o->g_in2;
3518 TCGV_UNUSED_I64(o->in2);
3519 o->g_in2 = false;
3520 return NO_EXIT;
3523 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3525 o->out = o->in1;
3526 o->out2 = o->in2;
3527 o->g_out = o->g_in1;
3528 o->g_out2 = o->g_in2;
3529 TCGV_UNUSED_I64(o->in1);
3530 TCGV_UNUSED_I64(o->in2);
3531 o->g_in1 = o->g_in2 = false;
3532 return NO_EXIT;
3535 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3537 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3538 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
3539 potential_page_fault(s);
3540 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
3541 tcg_temp_free_i32(r1);
3542 tcg_temp_free_i32(r2);
3543 set_cc_static(s);
3544 return NO_EXIT;
3547 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3549 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3550 return NO_EXIT;
3553 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3555 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
3556 return_low128(o->out2);
3557 return NO_EXIT;
3560 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3562 gen_helper_nabs_i64(o->out, o->in2);
3563 return NO_EXIT;
3566 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3568 tcg_gen_neg_i64(o->out, o->in2);
3569 return NO_EXIT;
3572 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3574 tcg_gen_or_i64(o->out, o->in1, o->in2);
3575 return NO_EXIT;
3578 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3580 int shift = s->insn->data & 0xff;
3581 int size = s->insn->data >> 8;
3582 uint64_t mask = ((1ull << size) - 1) << shift;
3584 assert(!o->g_in2);
3585 tcg_gen_shli_i64(o->in2, o->in2, shift);
3586 tcg_gen_or_i64(o->out, o->in1, o->in2);
3588 /* Produce the CC from only the bits manipulated. */
3589 tcg_gen_andi_i64(cc_dst, o->out, mask);
3590 set_cc_nz_u64(s, cc_dst);
3591 return NO_EXIT;
3594 #ifndef CONFIG_USER_ONLY
3595 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3597 check_privileged(s);
3598 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3599 return NO_EXIT;
3601 #endif
3603 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3605 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3606 return NO_EXIT;
3609 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3611 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3612 return NO_EXIT;
3615 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3617 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3618 return NO_EXIT;
3621 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3623 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3624 return NO_EXIT;
3627 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3629 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3630 return NO_EXIT;
3633 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3635 TCGv_i64 cc;
3637 assert(!o->g_in2);
3638 tcg_gen_not_i64(o->in2, o->in2);
3639 tcg_gen_add_i64(o->out, o->in1, o->in2);
3641 /* XXX possible optimization point */
3642 gen_op_calc_cc(s);
3643 cc = tcg_temp_new_i64();
3644 tcg_gen_extu_i32_i64(cc, cc_op);
3645 tcg_gen_shri_i64(cc, cc, 1);
3646 tcg_gen_add_i64(o->out, o->out, cc);
3647 tcg_temp_free_i64(cc);
3648 return NO_EXIT;
3651 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3653 TCGv_i32 t;
3655 update_psw_addr(s);
3656 gen_op_calc_cc(s);
3658 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3659 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3660 tcg_temp_free_i32(t);
3662 t = tcg_const_i32(s->next_pc - s->pc);
3663 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3664 tcg_temp_free_i32(t);
3666 gen_exception(EXCP_SVC);
3667 return EXIT_NORETURN;
3670 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3672 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3673 return NO_EXIT;
3676 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3678 int shift = s->insn->data & 0xff;
3679 int size = s->insn->data >> 8;
3680 uint64_t mask = ((1ull << size) - 1) << shift;
3682 assert(!o->g_in2);
3683 tcg_gen_shli_i64(o->in2, o->in2, shift);
3684 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3686 /* Produce the CC from only the bits manipulated. */
3687 tcg_gen_andi_i64(cc_dst, o->out, mask);
3688 set_cc_nz_u64(s, cc_dst);
3689 return NO_EXIT;
3692 /* ====================================================================== */
3693 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3694 the original inputs), update the various cc data structures in order to
3695 be able to compute the new condition code. */
3697 static void cout_abs32(DisasContext *s, DisasOps *o)
3699 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3702 static void cout_abs64(DisasContext *s, DisasOps *o)
3704 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3707 static void cout_adds32(DisasContext *s, DisasOps *o)
3709 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3712 static void cout_adds64(DisasContext *s, DisasOps *o)
3714 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3717 static void cout_addu32(DisasContext *s, DisasOps *o)
3719 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3722 static void cout_addu64(DisasContext *s, DisasOps *o)
3724 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3727 static void cout_addc32(DisasContext *s, DisasOps *o)
3729 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3732 static void cout_addc64(DisasContext *s, DisasOps *o)
3734 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3737 static void cout_cmps32(DisasContext *s, DisasOps *o)
3739 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3742 static void cout_cmps64(DisasContext *s, DisasOps *o)
3744 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3747 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3749 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3752 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3754 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3757 static void cout_nabs32(DisasContext *s, DisasOps *o)
3759 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3762 static void cout_nabs64(DisasContext *s, DisasOps *o)
3764 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3767 static void cout_neg32(DisasContext *s, DisasOps *o)
3769 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3772 static void cout_neg64(DisasContext *s, DisasOps *o)
3774 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3777 static void cout_nz32(DisasContext *s, DisasOps *o)
3779 tcg_gen_ext32u_i64(cc_dst, o->out);
3780 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3783 static void cout_nz64(DisasContext *s, DisasOps *o)
3785 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3788 static void cout_s32(DisasContext *s, DisasOps *o)
3790 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3793 static void cout_s64(DisasContext *s, DisasOps *o)
3795 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3798 static void cout_subs32(DisasContext *s, DisasOps *o)
3800 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3803 static void cout_subs64(DisasContext *s, DisasOps *o)
3805 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3808 static void cout_subu32(DisasContext *s, DisasOps *o)
3810 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3813 static void cout_subu64(DisasContext *s, DisasOps *o)
3815 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3818 static void cout_subb32(DisasContext *s, DisasOps *o)
3820 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3823 static void cout_subb64(DisasContext *s, DisasOps *o)
3825 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3828 static void cout_tm32(DisasContext *s, DisasOps *o)
3830 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3833 static void cout_tm64(DisasContext *s, DisasOps *o)
3835 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3838 /* ====================================================================== */
3839 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3840 with the TCG register to which we will write. Used in combination with
3841 the "wout" generators, in some cases we need a new temporary, and in
3842 some cases we can write to a TCG global. */
3844 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3846 o->out = tcg_temp_new_i64();
3849 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3851 o->out = tcg_temp_new_i64();
3852 o->out2 = tcg_temp_new_i64();
3855 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3857 o->out = regs[get_field(f, r1)];
3858 o->g_out = true;
3861 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3863 /* ??? Specification exception: r1 must be even. */
3864 int r1 = get_field(f, r1);
3865 o->out = regs[r1];
3866 o->out2 = regs[(r1 + 1) & 15];
3867 o->g_out = o->g_out2 = true;
3870 /* ====================================================================== */
3871 /* The "Write OUTput" generators. These generally perform some non-trivial
3872 copy of data to TCG globals, or to main memory. The trivial cases are
3873 generally handled by having a "prep" generator install the TCG global
3874 as the destination of the operation. */
3876 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3878 store_reg(get_field(f, r1), o->out);
3881 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3883 int r1 = get_field(f, r1);
3884 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3887 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3889 store_reg32_i64(get_field(f, r1), o->out);
3892 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3894 /* ??? Specification exception: r1 must be even. */
3895 int r1 = get_field(f, r1);
3896 store_reg32_i64(r1, o->out);
3897 store_reg32_i64((r1 + 1) & 15, o->out2);
3900 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3902 /* ??? Specification exception: r1 must be even. */
3903 int r1 = get_field(f, r1);
3904 store_reg32_i64((r1 + 1) & 15, o->out);
3905 tcg_gen_shri_i64(o->out, o->out, 32);
3906 store_reg32_i64(r1, o->out);
3909 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3911 store_freg32_i64(get_field(f, r1), o->out);
3914 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3916 store_freg(get_field(f, r1), o->out);
3919 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3921 int f1 = get_field(s->fields, r1);
3922 store_freg(f1, o->out);
3923 store_freg((f1 + 2) & 15, o->out2);
3926 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3928 if (get_field(f, r1) != get_field(f, r2)) {
3929 store_reg32_i64(get_field(f, r1), o->out);
3933 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3935 if (get_field(f, r1) != get_field(f, r2)) {
3936 store_freg32_i64(get_field(f, r1), o->out);
3940 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3942 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3945 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3947 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3950 /* ====================================================================== */
3951 /* The "INput 1" generators. These load the first operand to an insn. */
3953 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3955 o->in1 = load_reg(get_field(f, r1));
3958 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3960 o->in1 = regs[get_field(f, r1)];
3961 o->g_in1 = true;
3964 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3966 /* ??? Specification exception: r1 must be even. */
3967 int r1 = get_field(f, r1);
3968 o->in1 = load_reg((r1 + 1) & 15);
3971 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3973 /* ??? Specification exception: r1 must be even. */
3974 int r1 = get_field(f, r1);
3975 o->in1 = tcg_temp_new_i64();
3976 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3979 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3981 /* ??? Specification exception: r1 must be even. */
3982 int r1 = get_field(f, r1);
3983 o->in1 = tcg_temp_new_i64();
3984 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3987 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3989 /* ??? Specification exception: r1 must be even. */
3990 int r1 = get_field(f, r1);
3991 o->in1 = tcg_temp_new_i64();
3992 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3995 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3997 o->in1 = load_reg(get_field(f, r2));
4000 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4002 o->in1 = load_reg(get_field(f, r3));
4005 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4007 o->in1 = load_freg32_i64(get_field(f, r1));
4010 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4012 o->in1 = fregs[get_field(f, r1)];
4013 o->g_in1 = true;
4016 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4018 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4021 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4023 in1_la1(s, f, o);
4024 o->in1 = tcg_temp_new_i64();
4025 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4028 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4030 in1_la1(s, f, o);
4031 o->in1 = tcg_temp_new_i64();
4032 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4035 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4037 in1_la1(s, f, o);
4038 o->in1 = tcg_temp_new_i64();
4039 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4042 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4044 in1_la1(s, f, o);
4045 o->in1 = tcg_temp_new_i64();
4046 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4049 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4051 in1_la1(s, f, o);
4052 o->in1 = tcg_temp_new_i64();
4053 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4056 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4058 in1_la1(s, f, o);
4059 o->in1 = tcg_temp_new_i64();
4060 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4063 /* ====================================================================== */
4064 /* The "INput 2" generators. These load the second operand to an insn. */
4066 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4068 o->in2 = load_reg(get_field(f, r2));
4071 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4073 o->in2 = regs[get_field(f, r2)];
4074 o->g_in2 = true;
4077 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4079 int r2 = get_field(f, r2);
4080 if (r2 != 0) {
4081 o->in2 = load_reg(r2);
4085 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4087 o->in2 = tcg_temp_new_i64();
4088 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4091 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4093 o->in2 = tcg_temp_new_i64();
4094 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4097 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4099 o->in2 = tcg_temp_new_i64();
4100 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4103 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4105 o->in2 = tcg_temp_new_i64();
4106 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4109 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4111 o->in2 = load_reg(get_field(f, r3));
4114 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4116 o->in2 = tcg_temp_new_i64();
4117 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4120 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4122 o->in2 = tcg_temp_new_i64();
4123 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4126 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4128 o->in2 = load_freg32_i64(get_field(f, r2));
4131 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4133 o->in2 = fregs[get_field(f, r2)];
4134 o->g_in2 = true;
4137 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4139 int f2 = get_field(f, r2);
4140 o->in1 = fregs[f2];
4141 o->in2 = fregs[(f2 + 2) & 15];
4142 o->g_in1 = o->g_in2 = true;
4145 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4147 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4148 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4151 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4153 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4156 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4158 in2_a2(s, f, o);
4159 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4162 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4164 in2_a2(s, f, o);
4165 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4168 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4170 in2_a2(s, f, o);
4171 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4174 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4176 in2_a2(s, f, o);
4177 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4180 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4182 in2_a2(s, f, o);
4183 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4186 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4188 in2_ri2(s, f, o);
4189 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4192 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4194 in2_ri2(s, f, o);
4195 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4198 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4200 in2_ri2(s, f, o);
4201 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4204 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4206 in2_ri2(s, f, o);
4207 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4210 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4212 o->in2 = tcg_const_i64(get_field(f, i2));
4215 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4217 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4220 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4222 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4225 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4227 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4230 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4232 uint64_t i2 = (uint16_t)get_field(f, i2);
4233 o->in2 = tcg_const_i64(i2 << s->insn->data);
4236 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4238 uint64_t i2 = (uint32_t)get_field(f, i2);
4239 o->in2 = tcg_const_i64(i2 << s->insn->data);
4242 /* ====================================================================== */
4244 /* Find opc within the table of insns. This is formulated as a switch
4245 statement so that (1) we get compile-time notice of cut-paste errors
4246 for duplicated opcodes, and (2) the compiler generates the binary
4247 search tree, rather than us having to post-process the table. */
4249 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4250 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4252 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4254 enum DisasInsnEnum {
4255 #include "insn-data.def"
4258 #undef D
4259 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4260 .opc = OPC, \
4261 .fmt = FMT_##FT, \
4262 .fac = FAC_##FC, \
4263 .name = #NM, \
4264 .help_in1 = in1_##I1, \
4265 .help_in2 = in2_##I2, \
4266 .help_prep = prep_##P, \
4267 .help_wout = wout_##W, \
4268 .help_cout = cout_##CC, \
4269 .help_op = op_##OP, \
4270 .data = D \
4273 /* Allow 0 to be used for NULL in the table below. */
4274 #define in1_0 NULL
4275 #define in2_0 NULL
4276 #define prep_0 NULL
4277 #define wout_0 NULL
4278 #define cout_0 NULL
4279 #define op_0 NULL
4281 static const DisasInsn insn_info[] = {
4282 #include "insn-data.def"
4285 #undef D
4286 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4287 case OPC: return &insn_info[insn_ ## NM];
4289 static const DisasInsn *lookup_opc(uint16_t opc)
4291 switch (opc) {
4292 #include "insn-data.def"
4293 default:
4294 return NULL;
4298 #undef D
4299 #undef C
4301 /* Extract a field from the insn. The INSN should be left-aligned in
4302 the uint64_t so that we can more easily utilize the big-bit-endian
4303 definitions we extract from the Principals of Operation. */
4305 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4307 uint32_t r, m;
4309 if (f->size == 0) {
4310 return;
4313 /* Zero extract the field from the insn. */
4314 r = (insn << f->beg) >> (64 - f->size);
4316 /* Sign-extend, or un-swap the field as necessary. */
4317 switch (f->type) {
4318 case 0: /* unsigned */
4319 break;
4320 case 1: /* signed */
4321 assert(f->size <= 32);
4322 m = 1u << (f->size - 1);
4323 r = (r ^ m) - m;
4324 break;
4325 case 2: /* dl+dh split, signed 20 bit. */
4326 r = ((int8_t)r << 12) | (r >> 8);
4327 break;
4328 default:
4329 abort();
4332 /* Validate that the "compressed" encoding we selected above is valid.
4333 I.e. we havn't make two different original fields overlap. */
4334 assert(((o->presentC >> f->indexC) & 1) == 0);
4335 o->presentC |= 1 << f->indexC;
4336 o->presentO |= 1 << f->indexO;
4338 o->c[f->indexC] = r;
4341 /* Lookup the insn at the current PC, extracting the operands into O and
4342 returning the info struct for the insn. Returns NULL for invalid insn. */
4344 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4345 DisasFields *f)
4347 uint64_t insn, pc = s->pc;
4348 int op, op2, ilen;
4349 const DisasInsn *info;
4351 insn = ld_code2(env, pc);
4352 op = (insn >> 8) & 0xff;
4353 ilen = get_ilen(op);
4354 s->next_pc = s->pc + ilen;
4356 switch (ilen) {
4357 case 2:
4358 insn = insn << 48;
4359 break;
4360 case 4:
4361 insn = ld_code4(env, pc) << 32;
4362 break;
4363 case 6:
4364 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4365 break;
4366 default:
4367 abort();
4370 /* We can't actually determine the insn format until we've looked up
4371 the full insn opcode. Which we can't do without locating the
4372 secondary opcode. Assume by default that OP2 is at bit 40; for
4373 those smaller insns that don't actually have a secondary opcode
4374 this will correctly result in OP2 = 0. */
4375 switch (op) {
4376 case 0x01: /* E */
4377 case 0x80: /* S */
4378 case 0x82: /* S */
4379 case 0x93: /* S */
4380 case 0xb2: /* S, RRF, RRE */
4381 case 0xb3: /* RRE, RRD, RRF */
4382 case 0xb9: /* RRE, RRF */
4383 case 0xe5: /* SSE, SIL */
4384 op2 = (insn << 8) >> 56;
4385 break;
4386 case 0xa5: /* RI */
4387 case 0xa7: /* RI */
4388 case 0xc0: /* RIL */
4389 case 0xc2: /* RIL */
4390 case 0xc4: /* RIL */
4391 case 0xc6: /* RIL */
4392 case 0xc8: /* SSF */
4393 case 0xcc: /* RIL */
4394 op2 = (insn << 12) >> 60;
4395 break;
4396 case 0xd0 ... 0xdf: /* SS */
4397 case 0xe1: /* SS */
4398 case 0xe2: /* SS */
4399 case 0xe8: /* SS */
4400 case 0xe9: /* SS */
4401 case 0xea: /* SS */
4402 case 0xee ... 0xf3: /* SS */
4403 case 0xf8 ... 0xfd: /* SS */
4404 op2 = 0;
4405 break;
4406 default:
4407 op2 = (insn << 40) >> 56;
4408 break;
4411 memset(f, 0, sizeof(*f));
4412 f->op = op;
4413 f->op2 = op2;
4415 /* Lookup the instruction. */
4416 info = lookup_opc(op << 8 | op2);
4418 /* If we found it, extract the operands. */
4419 if (info != NULL) {
4420 DisasFormat fmt = info->fmt;
4421 int i;
4423 for (i = 0; i < NUM_C_FIELD; ++i) {
4424 extract_field(f, &format_info[fmt].op[i], insn);
4427 return info;
4430 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4432 const DisasInsn *insn;
4433 ExitStatus ret = NO_EXIT;
4434 DisasFields f;
4435 DisasOps o;
4437 insn = extract_insn(env, s, &f);
4439 /* If not found, try the old interpreter. This includes ILLOPC. */
4440 if (insn == NULL) {
4441 disas_s390_insn(env, s);
4442 switch (s->is_jmp) {
4443 case DISAS_NEXT:
4444 ret = NO_EXIT;
4445 break;
4446 case DISAS_TB_JUMP:
4447 ret = EXIT_GOTO_TB;
4448 break;
4449 case DISAS_JUMP:
4450 ret = EXIT_PC_UPDATED;
4451 break;
4452 case DISAS_EXCP:
4453 ret = EXIT_NORETURN;
4454 break;
4455 default:
4456 abort();
4459 s->pc = s->next_pc;
4460 return ret;
4463 /* Set up the strutures we use to communicate with the helpers. */
4464 s->insn = insn;
4465 s->fields = &f;
4466 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4467 TCGV_UNUSED_I64(o.out);
4468 TCGV_UNUSED_I64(o.out2);
4469 TCGV_UNUSED_I64(o.in1);
4470 TCGV_UNUSED_I64(o.in2);
4471 TCGV_UNUSED_I64(o.addr1);
4473 /* Implement the instruction. */
4474 if (insn->help_in1) {
4475 insn->help_in1(s, &f, &o);
4477 if (insn->help_in2) {
4478 insn->help_in2(s, &f, &o);
4480 if (insn->help_prep) {
4481 insn->help_prep(s, &f, &o);
4483 if (insn->help_op) {
4484 ret = insn->help_op(s, &o);
4486 if (insn->help_wout) {
4487 insn->help_wout(s, &f, &o);
4489 if (insn->help_cout) {
4490 insn->help_cout(s, &o);
4493 /* Free any temporaries created by the helpers. */
4494 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4495 tcg_temp_free_i64(o.out);
4497 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4498 tcg_temp_free_i64(o.out2);
4500 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4501 tcg_temp_free_i64(o.in1);
4503 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4504 tcg_temp_free_i64(o.in2);
4506 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4507 tcg_temp_free_i64(o.addr1);
4510 /* Advance to the next instruction. */
4511 s->pc = s->next_pc;
4512 return ret;
4515 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4516 TranslationBlock *tb,
4517 int search_pc)
4519 DisasContext dc;
4520 target_ulong pc_start;
4521 uint64_t next_page_start;
4522 uint16_t *gen_opc_end;
4523 int j, lj = -1;
4524 int num_insns, max_insns;
4525 CPUBreakpoint *bp;
4526 ExitStatus status;
4527 bool do_debug;
4529 pc_start = tb->pc;
4531 /* 31-bit mode */
4532 if (!(tb->flags & FLAG_MASK_64)) {
4533 pc_start &= 0x7fffffff;
4536 dc.tb = tb;
4537 dc.pc = pc_start;
4538 dc.cc_op = CC_OP_DYNAMIC;
4539 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4540 dc.is_jmp = DISAS_NEXT;
4542 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4544 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4546 num_insns = 0;
4547 max_insns = tb->cflags & CF_COUNT_MASK;
4548 if (max_insns == 0) {
4549 max_insns = CF_COUNT_MASK;
4552 gen_icount_start();
4554 do {
4555 if (search_pc) {
4556 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4557 if (lj < j) {
4558 lj++;
4559 while (lj < j) {
4560 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4563 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4564 gen_opc_cc_op[lj] = dc.cc_op;
4565 tcg_ctx.gen_opc_instr_start[lj] = 1;
4566 tcg_ctx.gen_opc_icount[lj] = num_insns;
4568 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4569 gen_io_start();
4572 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4573 tcg_gen_debug_insn_start(dc.pc);
4576 status = NO_EXIT;
4577 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4578 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4579 if (bp->pc == dc.pc) {
4580 status = EXIT_PC_STALE;
4581 do_debug = true;
4582 break;
4586 if (status == NO_EXIT) {
4587 status = translate_one(env, &dc);
4590 /* If we reach a page boundary, are single stepping,
4591 or exhaust instruction count, stop generation. */
4592 if (status == NO_EXIT
4593 && (dc.pc >= next_page_start
4594 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4595 || num_insns >= max_insns
4596 || singlestep
4597 || env->singlestep_enabled)) {
4598 status = EXIT_PC_STALE;
4600 } while (status == NO_EXIT);
4602 if (tb->cflags & CF_LAST_IO) {
4603 gen_io_end();
4606 switch (status) {
4607 case EXIT_GOTO_TB:
4608 case EXIT_NORETURN:
4609 break;
4610 case EXIT_PC_STALE:
4611 update_psw_addr(&dc);
4612 /* FALLTHRU */
4613 case EXIT_PC_UPDATED:
4614 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4615 gen_op_calc_cc(&dc);
4616 } else {
4617 /* Next TB starts off with CC_OP_DYNAMIC,
4618 so make sure the cc op type is in env */
4619 gen_op_set_cc_op(&dc);
4621 if (do_debug) {
4622 gen_exception(EXCP_DEBUG);
4623 } else {
4624 /* Generate the return instruction */
4625 tcg_gen_exit_tb(0);
4627 break;
4628 default:
4629 abort();
4632 gen_icount_end(tb, num_insns);
4633 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4634 if (search_pc) {
4635 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4636 lj++;
4637 while (lj <= j) {
4638 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4640 } else {
4641 tb->size = dc.pc - pc_start;
4642 tb->icount = num_insns;
4645 #if defined(S390X_DEBUG_DISAS)
4646 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4647 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4648 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4649 qemu_log("\n");
4651 #endif
4654 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4656 gen_intermediate_code_internal(env, tb, 0);
4659 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4661 gen_intermediate_code_internal(env, tb, 1);
4664 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4666 int cc_op;
4667 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4668 cc_op = gen_opc_cc_op[pc_pos];
4669 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4670 env->cc_op = cc_op;