target-s390: Convert TEST DATA CLASS
[qemu-kvm.git] / target-s390x / translate.c
blob6593d88c68a38c623915392b47466b99c777db84
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
73 #define DISAS_EXCP 4
75 static void gen_op_calc_cc(DisasContext *s);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
82 static inline void debug_insn(uint64_t insn)
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
94 return pc;
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
100 int i;
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
137 #endif
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
144 #endif
146 cpu_fprintf(f, "\n");
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
163 void s390x_translate_init(void)
165 int i;
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
203 static inline TCGv_i64 load_reg(int reg)
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
210 static inline TCGv_i64 load_freg(int reg)
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
217 static inline TCGv_i32 load_freg32(int reg)
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
228 static inline TCGv_i64 load_freg32_i64(int reg)
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
235 static inline TCGv_i32 load_reg32(int reg)
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
242 static inline TCGv_i64 load_reg32_i64(int reg)
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
249 static inline void store_reg(int reg, TCGv_i64 v)
251 tcg_gen_mov_i64(regs[reg], v);
254 static inline void store_freg(int reg, TCGv_i64 v)
256 tcg_gen_mov_i64(fregs[reg], v);
259 static inline void store_reg32(int reg, TCGv_i32 v)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
281 static inline void store_freg32(int reg, TCGv_i32 v)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
297 static inline void return_low128(TCGv_i64 dest)
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
302 static inline void update_psw_addr(DisasContext *s)
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
308 static inline void potential_page_fault(DisasContext *s)
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
318 return (uint64_t)cpu_lduw_code(env, pc);
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
331 static inline int get_mem_index(DisasContext *s)
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
346 static void gen_exception(int excp)
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
353 static void gen_program_exception(DisasContext *s, int code)
355 TCGv_i32 tmp;
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
370 /* Save off cc. */
371 gen_op_calc_cc(s);
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
380 static inline void gen_illegal_opcode(DisasContext *s)
382 gen_program_exception(s, PGM_SPECIFICATION);
385 static inline void check_privileged(DisasContext *s)
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
394 TCGv_i64 tmp;
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
418 } else {
419 tmp = tcg_const_i64(d2);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
427 return tmp;
430 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
432 s->cc_op = CC_OP_CONST0 + val;
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
488 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
489 enum cc_op cond)
491 gen_op_update2_cc_i32(s, cond, v1, v2);
494 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
495 enum cc_op cond)
497 gen_op_update2_cc_i64(s, cond, v1, v2);
500 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
502 cmp_32(s, v1, v2, CC_OP_LTGT_32);
505 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
507 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
510 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
512 /* XXX optimize for the constant? put it in s? */
513 TCGv_i32 tmp = tcg_const_i32(v2);
514 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
515 tcg_temp_free_i32(tmp);
518 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
520 TCGv_i32 tmp = tcg_const_i32(v2);
521 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
522 tcg_temp_free_i32(tmp);
525 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
527 cmp_64(s, v1, v2, CC_OP_LTGT_64);
530 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
532 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
535 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
537 TCGv_i64 tmp = tcg_const_i64(v2);
538 cmp_s64(s, v1, tmp);
539 tcg_temp_free_i64(tmp);
542 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
544 TCGv_i64 tmp = tcg_const_i64(v2);
545 cmp_u64(s, v1, tmp);
546 tcg_temp_free_i64(tmp);
549 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
551 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
554 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
556 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
559 /* CC value is in env->cc_op */
560 static inline void set_cc_static(DisasContext *s)
562 tcg_gen_discard_i64(cc_src);
563 tcg_gen_discard_i64(cc_dst);
564 tcg_gen_discard_i64(cc_vr);
565 s->cc_op = CC_OP_STATIC;
568 static inline void gen_op_set_cc_op(DisasContext *s)
570 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
571 tcg_gen_movi_i32(cc_op, s->cc_op);
575 static inline void gen_update_cc_op(DisasContext *s)
577 gen_op_set_cc_op(s);
580 /* calculates cc into cc_op */
581 static void gen_op_calc_cc(DisasContext *s)
583 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
584 TCGv_i64 dummy = tcg_const_i64(0);
586 switch (s->cc_op) {
587 case CC_OP_CONST0:
588 case CC_OP_CONST1:
589 case CC_OP_CONST2:
590 case CC_OP_CONST3:
591 /* s->cc_op is the cc value */
592 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
593 break;
594 case CC_OP_STATIC:
595 /* env->cc_op already is the cc value */
596 break;
597 case CC_OP_NZ:
598 case CC_OP_ABS_64:
599 case CC_OP_NABS_64:
600 case CC_OP_ABS_32:
601 case CC_OP_NABS_32:
602 case CC_OP_LTGT0_32:
603 case CC_OP_LTGT0_64:
604 case CC_OP_COMP_32:
605 case CC_OP_COMP_64:
606 case CC_OP_NZ_F32:
607 case CC_OP_NZ_F64:
608 /* 1 argument */
609 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
610 break;
611 case CC_OP_ICM:
612 case CC_OP_LTGT_32:
613 case CC_OP_LTGT_64:
614 case CC_OP_LTUGTU_32:
615 case CC_OP_LTUGTU_64:
616 case CC_OP_TM_32:
617 case CC_OP_TM_64:
618 case CC_OP_SLA_32:
619 case CC_OP_SLA_64:
620 case CC_OP_NZ_F128:
621 /* 2 arguments */
622 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
623 break;
624 case CC_OP_ADD_64:
625 case CC_OP_ADDU_64:
626 case CC_OP_ADDC_64:
627 case CC_OP_SUB_64:
628 case CC_OP_SUBU_64:
629 case CC_OP_SUBB_64:
630 case CC_OP_ADD_32:
631 case CC_OP_ADDU_32:
632 case CC_OP_ADDC_32:
633 case CC_OP_SUB_32:
634 case CC_OP_SUBU_32:
635 case CC_OP_SUBB_32:
636 /* 3 arguments */
637 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
638 break;
639 case CC_OP_DYNAMIC:
640 /* unknown operation - assume 3 arguments and cc_op in env */
641 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
642 break;
643 default:
644 tcg_abort();
647 tcg_temp_free_i32(local_cc_op);
648 tcg_temp_free_i64(dummy);
650 /* We now have cc in cc_op as constant */
651 set_cc_static(s);
654 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
656 debug_insn(insn);
658 *r1 = (insn >> 4) & 0xf;
659 *r2 = insn & 0xf;
662 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
663 int *x2, int *b2, int *d2)
665 debug_insn(insn);
667 *r1 = (insn >> 20) & 0xf;
668 *x2 = (insn >> 16) & 0xf;
669 *b2 = (insn >> 12) & 0xf;
670 *d2 = insn & 0xfff;
672 return get_address(s, *x2, *b2, *d2);
675 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
676 int *b2, int *d2)
678 debug_insn(insn);
680 *r1 = (insn >> 20) & 0xf;
681 /* aka m3 */
682 *r3 = (insn >> 16) & 0xf;
683 *b2 = (insn >> 12) & 0xf;
684 *d2 = insn & 0xfff;
687 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
688 int *b1, int *d1)
690 debug_insn(insn);
692 *i2 = (insn >> 16) & 0xff;
693 *b1 = (insn >> 12) & 0xf;
694 *d1 = insn & 0xfff;
696 return get_address(s, 0, *b1, *d1);
699 static int use_goto_tb(DisasContext *s, uint64_t dest)
701 /* NOTE: we handle the case where the TB spans two pages here */
702 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
703 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
704 && !s->singlestep_enabled
705 && !(s->tb->cflags & CF_LAST_IO));
708 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
710 gen_update_cc_op(s);
712 if (use_goto_tb(s, pc)) {
713 tcg_gen_goto_tb(tb_num);
714 tcg_gen_movi_i64(psw_addr, pc);
715 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
716 } else {
717 /* jump to another page: currently not optimized */
718 tcg_gen_movi_i64(psw_addr, pc);
719 tcg_gen_exit_tb(0);
723 static inline void account_noninline_branch(DisasContext *s, int cc_op)
725 #ifdef DEBUG_INLINE_BRANCHES
726 inline_branch_miss[cc_op]++;
727 #endif
730 static inline void account_inline_branch(DisasContext *s, int cc_op)
732 #ifdef DEBUG_INLINE_BRANCHES
733 inline_branch_hit[cc_op]++;
734 #endif
737 /* Table of mask values to comparison codes, given a comparison as input.
738 For a true comparison CC=3 will never be set, but we treat this
739 conservatively for possible use when CC=3 indicates overflow. */
740 static const TCGCond ltgt_cond[16] = {
741 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
742 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
743 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
744 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
745 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
746 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
747 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
748 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
751 /* Table of mask values to comparison codes, given a logic op as input.
752 For such, only CC=0 and CC=1 should be possible. */
753 static const TCGCond nz_cond[16] = {
754 /* | | x | x */
755 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
756 /* | NE | x | x */
757 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
758 /* EQ | | x | x */
759 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
760 /* EQ | NE | x | x */
761 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
764 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
765 details required to generate a TCG comparison. */
766 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
768 TCGCond cond;
769 enum cc_op old_cc_op = s->cc_op;
771 if (mask == 15 || mask == 0) {
772 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
773 c->u.s32.a = cc_op;
774 c->u.s32.b = cc_op;
775 c->g1 = c->g2 = true;
776 c->is_64 = false;
777 return;
780 /* Find the TCG condition for the mask + cc op. */
781 switch (old_cc_op) {
782 case CC_OP_LTGT0_32:
783 case CC_OP_LTGT0_64:
784 case CC_OP_LTGT_32:
785 case CC_OP_LTGT_64:
786 cond = ltgt_cond[mask];
787 if (cond == TCG_COND_NEVER) {
788 goto do_dynamic;
790 account_inline_branch(s, old_cc_op);
791 break;
793 case CC_OP_LTUGTU_32:
794 case CC_OP_LTUGTU_64:
795 cond = tcg_unsigned_cond(ltgt_cond[mask]);
796 if (cond == TCG_COND_NEVER) {
797 goto do_dynamic;
799 account_inline_branch(s, old_cc_op);
800 break;
802 case CC_OP_NZ:
803 cond = nz_cond[mask];
804 if (cond == TCG_COND_NEVER) {
805 goto do_dynamic;
807 account_inline_branch(s, old_cc_op);
808 break;
810 case CC_OP_TM_32:
811 case CC_OP_TM_64:
812 switch (mask) {
813 case 8:
814 cond = TCG_COND_EQ;
815 break;
816 case 4 | 2 | 1:
817 cond = TCG_COND_NE;
818 break;
819 default:
820 goto do_dynamic;
822 account_inline_branch(s, old_cc_op);
823 break;
825 case CC_OP_ICM:
826 switch (mask) {
827 case 8:
828 cond = TCG_COND_EQ;
829 break;
830 case 4 | 2 | 1:
831 case 4 | 2:
832 cond = TCG_COND_NE;
833 break;
834 default:
835 goto do_dynamic;
837 account_inline_branch(s, old_cc_op);
838 break;
840 default:
841 do_dynamic:
842 /* Calculate cc value. */
843 gen_op_calc_cc(s);
844 /* FALLTHRU */
846 case CC_OP_STATIC:
847 /* Jump based on CC. We'll load up the real cond below;
848 the assignment here merely avoids a compiler warning. */
849 account_noninline_branch(s, old_cc_op);
850 old_cc_op = CC_OP_STATIC;
851 cond = TCG_COND_NEVER;
852 break;
855 /* Load up the arguments of the comparison. */
856 c->is_64 = true;
857 c->g1 = c->g2 = false;
858 switch (old_cc_op) {
859 case CC_OP_LTGT0_32:
860 c->is_64 = false;
861 c->u.s32.a = tcg_temp_new_i32();
862 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
863 c->u.s32.b = tcg_const_i32(0);
864 break;
865 case CC_OP_LTGT_32:
866 case CC_OP_LTUGTU_32:
867 c->is_64 = false;
868 c->u.s32.a = tcg_temp_new_i32();
869 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
870 c->u.s32.b = tcg_temp_new_i32();
871 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
872 break;
874 case CC_OP_LTGT0_64:
875 case CC_OP_NZ:
876 c->u.s64.a = cc_dst;
877 c->u.s64.b = tcg_const_i64(0);
878 c->g1 = true;
879 break;
880 case CC_OP_LTGT_64:
881 case CC_OP_LTUGTU_64:
882 c->u.s64.a = cc_src;
883 c->u.s64.b = cc_dst;
884 c->g1 = c->g2 = true;
885 break;
887 case CC_OP_TM_32:
888 case CC_OP_TM_64:
889 case CC_OP_ICM:
890 c->u.s64.a = tcg_temp_new_i64();
891 c->u.s64.b = tcg_const_i64(0);
892 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
893 break;
895 case CC_OP_STATIC:
896 c->is_64 = false;
897 c->u.s32.a = cc_op;
898 c->g1 = true;
899 switch (mask) {
900 case 0x8 | 0x4 | 0x2: /* cc != 3 */
901 cond = TCG_COND_NE;
902 c->u.s32.b = tcg_const_i32(3);
903 break;
904 case 0x8 | 0x4 | 0x1: /* cc != 2 */
905 cond = TCG_COND_NE;
906 c->u.s32.b = tcg_const_i32(2);
907 break;
908 case 0x8 | 0x2 | 0x1: /* cc != 1 */
909 cond = TCG_COND_NE;
910 c->u.s32.b = tcg_const_i32(1);
911 break;
912 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
913 cond = TCG_COND_EQ;
914 c->g1 = false;
915 c->u.s32.a = tcg_temp_new_i32();
916 c->u.s32.b = tcg_const_i32(0);
917 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
918 break;
919 case 0x8 | 0x4: /* cc < 2 */
920 cond = TCG_COND_LTU;
921 c->u.s32.b = tcg_const_i32(2);
922 break;
923 case 0x8: /* cc == 0 */
924 cond = TCG_COND_EQ;
925 c->u.s32.b = tcg_const_i32(0);
926 break;
927 case 0x4 | 0x2 | 0x1: /* cc != 0 */
928 cond = TCG_COND_NE;
929 c->u.s32.b = tcg_const_i32(0);
930 break;
931 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
932 cond = TCG_COND_NE;
933 c->g1 = false;
934 c->u.s32.a = tcg_temp_new_i32();
935 c->u.s32.b = tcg_const_i32(0);
936 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
937 break;
938 case 0x4: /* cc == 1 */
939 cond = TCG_COND_EQ;
940 c->u.s32.b = tcg_const_i32(1);
941 break;
942 case 0x2 | 0x1: /* cc > 1 */
943 cond = TCG_COND_GTU;
944 c->u.s32.b = tcg_const_i32(1);
945 break;
946 case 0x2: /* cc == 2 */
947 cond = TCG_COND_EQ;
948 c->u.s32.b = tcg_const_i32(2);
949 break;
950 case 0x1: /* cc == 3 */
951 cond = TCG_COND_EQ;
952 c->u.s32.b = tcg_const_i32(3);
953 break;
954 default:
955 /* CC is masked by something else: (8 >> cc) & mask. */
956 cond = TCG_COND_NE;
957 c->g1 = false;
958 c->u.s32.a = tcg_const_i32(8);
959 c->u.s32.b = tcg_const_i32(0);
960 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
961 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
962 break;
964 break;
966 default:
967 abort();
969 c->cond = cond;
972 static void free_compare(DisasCompare *c)
974 if (!c->g1) {
975 if (c->is_64) {
976 tcg_temp_free_i64(c->u.s64.a);
977 } else {
978 tcg_temp_free_i32(c->u.s32.a);
981 if (!c->g2) {
982 if (c->is_64) {
983 tcg_temp_free_i64(c->u.s64.b);
984 } else {
985 tcg_temp_free_i32(c->u.s32.b);
990 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
991 uint32_t insn)
993 TCGv_i64 tmp, tmp2, tmp3;
994 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
995 int r1, r2;
996 #ifndef CONFIG_USER_ONLY
997 int r3, d2, b2;
998 #endif
1000 r1 = (insn >> 4) & 0xf;
1001 r2 = insn & 0xf;
1003 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1005 switch (op) {
1006 case 0x22: /* IPM R1 [RRE] */
1007 tmp32_1 = tcg_const_i32(r1);
1008 gen_op_calc_cc(s);
1009 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1010 tcg_temp_free_i32(tmp32_1);
1011 break;
1012 case 0x41: /* CKSM R1,R2 [RRE] */
1013 tmp32_1 = tcg_const_i32(r1);
1014 tmp32_2 = tcg_const_i32(r2);
1015 potential_page_fault(s);
1016 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1017 tcg_temp_free_i32(tmp32_1);
1018 tcg_temp_free_i32(tmp32_2);
1019 gen_op_movi_cc(s, 0);
1020 break;
1021 case 0x4e: /* SAR R1,R2 [RRE] */
1022 tmp32_1 = load_reg32(r2);
1023 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1024 tcg_temp_free_i32(tmp32_1);
1025 break;
1026 case 0x4f: /* EAR R1,R2 [RRE] */
1027 tmp32_1 = tcg_temp_new_i32();
1028 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1029 store_reg32(r1, tmp32_1);
1030 tcg_temp_free_i32(tmp32_1);
1031 break;
1032 case 0x54: /* MVPG R1,R2 [RRE] */
1033 tmp = load_reg(0);
1034 tmp2 = load_reg(r1);
1035 tmp3 = load_reg(r2);
1036 potential_page_fault(s);
1037 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1038 tcg_temp_free_i64(tmp);
1039 tcg_temp_free_i64(tmp2);
1040 tcg_temp_free_i64(tmp3);
1041 /* XXX check CCO bit and set CC accordingly */
1042 gen_op_movi_cc(s, 0);
1043 break;
1044 case 0x55: /* MVST R1,R2 [RRE] */
1045 tmp32_1 = load_reg32(0);
1046 tmp32_2 = tcg_const_i32(r1);
1047 tmp32_3 = tcg_const_i32(r2);
1048 potential_page_fault(s);
1049 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1050 tcg_temp_free_i32(tmp32_1);
1051 tcg_temp_free_i32(tmp32_2);
1052 tcg_temp_free_i32(tmp32_3);
1053 gen_op_movi_cc(s, 1);
1054 break;
1055 case 0x5d: /* CLST R1,R2 [RRE] */
1056 tmp32_1 = load_reg32(0);
1057 tmp32_2 = tcg_const_i32(r1);
1058 tmp32_3 = tcg_const_i32(r2);
1059 potential_page_fault(s);
1060 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1061 set_cc_static(s);
1062 tcg_temp_free_i32(tmp32_1);
1063 tcg_temp_free_i32(tmp32_2);
1064 tcg_temp_free_i32(tmp32_3);
1065 break;
1066 case 0x5e: /* SRST R1,R2 [RRE] */
1067 tmp32_1 = load_reg32(0);
1068 tmp32_2 = tcg_const_i32(r1);
1069 tmp32_3 = tcg_const_i32(r2);
1070 potential_page_fault(s);
1071 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1072 set_cc_static(s);
1073 tcg_temp_free_i32(tmp32_1);
1074 tcg_temp_free_i32(tmp32_2);
1075 tcg_temp_free_i32(tmp32_3);
1076 break;
1078 #ifndef CONFIG_USER_ONLY
1079 case 0x02: /* STIDP D2(B2) [S] */
1080 /* Store CPU ID */
1081 check_privileged(s);
1082 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1083 tmp = get_address(s, 0, b2, d2);
1084 potential_page_fault(s);
1085 gen_helper_stidp(cpu_env, tmp);
1086 tcg_temp_free_i64(tmp);
1087 break;
1088 case 0x04: /* SCK D2(B2) [S] */
1089 /* Set Clock */
1090 check_privileged(s);
1091 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1092 tmp = get_address(s, 0, b2, d2);
1093 potential_page_fault(s);
1094 gen_helper_sck(cc_op, tmp);
1095 set_cc_static(s);
1096 tcg_temp_free_i64(tmp);
1097 break;
1098 case 0x05: /* STCK D2(B2) [S] */
1099 /* Store Clock */
1100 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1101 tmp = get_address(s, 0, b2, d2);
1102 potential_page_fault(s);
1103 gen_helper_stck(cc_op, cpu_env, tmp);
1104 set_cc_static(s);
1105 tcg_temp_free_i64(tmp);
1106 break;
1107 case 0x06: /* SCKC D2(B2) [S] */
1108 /* Set Clock Comparator */
1109 check_privileged(s);
1110 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1111 tmp = get_address(s, 0, b2, d2);
1112 potential_page_fault(s);
1113 gen_helper_sckc(cpu_env, tmp);
1114 tcg_temp_free_i64(tmp);
1115 break;
1116 case 0x07: /* STCKC D2(B2) [S] */
1117 /* Store Clock Comparator */
1118 check_privileged(s);
1119 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1120 tmp = get_address(s, 0, b2, d2);
1121 potential_page_fault(s);
1122 gen_helper_stckc(cpu_env, tmp);
1123 tcg_temp_free_i64(tmp);
1124 break;
1125 case 0x08: /* SPT D2(B2) [S] */
1126 /* Set CPU Timer */
1127 check_privileged(s);
1128 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1129 tmp = get_address(s, 0, b2, d2);
1130 potential_page_fault(s);
1131 gen_helper_spt(cpu_env, tmp);
1132 tcg_temp_free_i64(tmp);
1133 break;
1134 case 0x09: /* STPT D2(B2) [S] */
1135 /* Store CPU Timer */
1136 check_privileged(s);
1137 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1138 tmp = get_address(s, 0, b2, d2);
1139 potential_page_fault(s);
1140 gen_helper_stpt(cpu_env, tmp);
1141 tcg_temp_free_i64(tmp);
1142 break;
1143 case 0x0a: /* SPKA D2(B2) [S] */
1144 /* Set PSW Key from Address */
1145 check_privileged(s);
1146 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1147 tmp = get_address(s, 0, b2, d2);
1148 tmp2 = tcg_temp_new_i64();
1149 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1150 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1151 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1152 tcg_temp_free_i64(tmp2);
1153 tcg_temp_free_i64(tmp);
1154 break;
1155 case 0x0d: /* PTLB [S] */
1156 /* Purge TLB */
1157 check_privileged(s);
1158 gen_helper_ptlb(cpu_env);
1159 break;
1160 case 0x10: /* SPX D2(B2) [S] */
1161 /* Set Prefix Register */
1162 check_privileged(s);
1163 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1164 tmp = get_address(s, 0, b2, d2);
1165 potential_page_fault(s);
1166 gen_helper_spx(cpu_env, tmp);
1167 tcg_temp_free_i64(tmp);
1168 break;
1169 case 0x11: /* STPX D2(B2) [S] */
1170 /* Store Prefix */
1171 check_privileged(s);
1172 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1173 tmp = get_address(s, 0, b2, d2);
1174 tmp2 = tcg_temp_new_i64();
1175 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1176 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1177 tcg_temp_free_i64(tmp);
1178 tcg_temp_free_i64(tmp2);
1179 break;
1180 case 0x12: /* STAP D2(B2) [S] */
1181 /* Store CPU Address */
1182 check_privileged(s);
1183 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1184 tmp = get_address(s, 0, b2, d2);
1185 tmp2 = tcg_temp_new_i64();
1186 tmp32_1 = tcg_temp_new_i32();
1187 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1188 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1189 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1190 tcg_temp_free_i64(tmp);
1191 tcg_temp_free_i64(tmp2);
1192 tcg_temp_free_i32(tmp32_1);
1193 break;
1194 case 0x21: /* IPTE R1,R2 [RRE] */
1195 /* Invalidate PTE */
1196 check_privileged(s);
1197 r1 = (insn >> 4) & 0xf;
1198 r2 = insn & 0xf;
1199 tmp = load_reg(r1);
1200 tmp2 = load_reg(r2);
1201 gen_helper_ipte(cpu_env, tmp, tmp2);
1202 tcg_temp_free_i64(tmp);
1203 tcg_temp_free_i64(tmp2);
1204 break;
1205 case 0x29: /* ISKE R1,R2 [RRE] */
1206 /* Insert Storage Key Extended */
1207 check_privileged(s);
1208 r1 = (insn >> 4) & 0xf;
1209 r2 = insn & 0xf;
1210 tmp = load_reg(r2);
1211 tmp2 = tcg_temp_new_i64();
1212 gen_helper_iske(tmp2, cpu_env, tmp);
1213 store_reg(r1, tmp2);
1214 tcg_temp_free_i64(tmp);
1215 tcg_temp_free_i64(tmp2);
1216 break;
1217 case 0x2a: /* RRBE R1,R2 [RRE] */
1218 /* Set Storage Key Extended */
1219 check_privileged(s);
1220 r1 = (insn >> 4) & 0xf;
1221 r2 = insn & 0xf;
1222 tmp32_1 = load_reg32(r1);
1223 tmp = load_reg(r2);
1224 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1225 set_cc_static(s);
1226 tcg_temp_free_i32(tmp32_1);
1227 tcg_temp_free_i64(tmp);
1228 break;
1229 case 0x2b: /* SSKE R1,R2 [RRE] */
1230 /* Set Storage Key Extended */
1231 check_privileged(s);
1232 r1 = (insn >> 4) & 0xf;
1233 r2 = insn & 0xf;
1234 tmp32_1 = load_reg32(r1);
1235 tmp = load_reg(r2);
1236 gen_helper_sske(cpu_env, tmp32_1, tmp);
1237 tcg_temp_free_i32(tmp32_1);
1238 tcg_temp_free_i64(tmp);
1239 break;
1240 case 0x34: /* STCH ? */
1241 /* Store Subchannel */
1242 check_privileged(s);
1243 gen_op_movi_cc(s, 3);
1244 break;
1245 case 0x46: /* STURA R1,R2 [RRE] */
1246 /* Store Using Real Address */
1247 check_privileged(s);
1248 r1 = (insn >> 4) & 0xf;
1249 r2 = insn & 0xf;
1250 tmp32_1 = load_reg32(r1);
1251 tmp = load_reg(r2);
1252 potential_page_fault(s);
1253 gen_helper_stura(cpu_env, tmp, tmp32_1);
1254 tcg_temp_free_i32(tmp32_1);
1255 tcg_temp_free_i64(tmp);
1256 break;
1257 case 0x50: /* CSP R1,R2 [RRE] */
1258 /* Compare And Swap And Purge */
1259 check_privileged(s);
1260 r1 = (insn >> 4) & 0xf;
1261 r2 = insn & 0xf;
1262 tmp32_1 = tcg_const_i32(r1);
1263 tmp32_2 = tcg_const_i32(r2);
1264 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1265 set_cc_static(s);
1266 tcg_temp_free_i32(tmp32_1);
1267 tcg_temp_free_i32(tmp32_2);
1268 break;
1269 case 0x5f: /* CHSC ? */
1270 /* Channel Subsystem Call */
1271 check_privileged(s);
1272 gen_op_movi_cc(s, 3);
1273 break;
1274 case 0x78: /* STCKE D2(B2) [S] */
1275 /* Store Clock Extended */
1276 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1277 tmp = get_address(s, 0, b2, d2);
1278 potential_page_fault(s);
1279 gen_helper_stcke(cc_op, cpu_env, tmp);
1280 set_cc_static(s);
1281 tcg_temp_free_i64(tmp);
1282 break;
1283 case 0x79: /* SACF D2(B2) [S] */
1284 /* Set Address Space Control Fast */
1285 check_privileged(s);
1286 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1287 tmp = get_address(s, 0, b2, d2);
1288 potential_page_fault(s);
1289 gen_helper_sacf(cpu_env, tmp);
1290 tcg_temp_free_i64(tmp);
1291 /* addressing mode has changed, so end the block */
1292 s->pc = s->next_pc;
1293 update_psw_addr(s);
1294 s->is_jmp = DISAS_JUMP;
1295 break;
1296 case 0x7d: /* STSI D2,(B2) [S] */
1297 check_privileged(s);
1298 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1299 tmp = get_address(s, 0, b2, d2);
1300 tmp32_1 = load_reg32(0);
1301 tmp32_2 = load_reg32(1);
1302 potential_page_fault(s);
1303 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1304 set_cc_static(s);
1305 tcg_temp_free_i64(tmp);
1306 tcg_temp_free_i32(tmp32_1);
1307 tcg_temp_free_i32(tmp32_2);
1308 break;
1309 case 0x9d: /* LFPC D2(B2) [S] */
1310 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1311 tmp = get_address(s, 0, b2, d2);
1312 tmp2 = tcg_temp_new_i64();
1313 tmp32_1 = tcg_temp_new_i32();
1314 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1315 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1316 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1317 tcg_temp_free_i64(tmp);
1318 tcg_temp_free_i64(tmp2);
1319 tcg_temp_free_i32(tmp32_1);
1320 break;
1321 case 0xb1: /* STFL D2(B2) [S] */
1322 /* Store Facility List (CPU features) at 200 */
1323 check_privileged(s);
1324 tmp2 = tcg_const_i64(0xc0000000);
1325 tmp = tcg_const_i64(200);
1326 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1327 tcg_temp_free_i64(tmp2);
1328 tcg_temp_free_i64(tmp);
1329 break;
1330 case 0xb2: /* LPSWE D2(B2) [S] */
1331 /* Load PSW Extended */
1332 check_privileged(s);
1333 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1334 tmp = get_address(s, 0, b2, d2);
1335 tmp2 = tcg_temp_new_i64();
1336 tmp3 = tcg_temp_new_i64();
1337 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1338 tcg_gen_addi_i64(tmp, tmp, 8);
1339 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1340 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1341 /* we need to keep cc_op intact */
1342 s->is_jmp = DISAS_JUMP;
1343 tcg_temp_free_i64(tmp);
1344 tcg_temp_free_i64(tmp2);
1345 tcg_temp_free_i64(tmp3);
1346 break;
1347 case 0x20: /* SERVC R1,R2 [RRE] */
1348 /* SCLP Service call (PV hypercall) */
1349 check_privileged(s);
1350 potential_page_fault(s);
1351 tmp32_1 = load_reg32(r2);
1352 tmp = load_reg(r1);
1353 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1354 set_cc_static(s);
1355 tcg_temp_free_i32(tmp32_1);
1356 tcg_temp_free_i64(tmp);
1357 break;
1358 #endif
1359 default:
1360 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1361 gen_illegal_opcode(s);
1362 break;
1366 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1367 int r1, int r2)
1369 TCGv_i64 tmp;
1370 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1371 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1372 #define FP_HELPER(i) \
1373 tmp32_1 = tcg_const_i32(r1); \
1374 tmp32_2 = tcg_const_i32(r2); \
1375 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1376 tcg_temp_free_i32(tmp32_1); \
1377 tcg_temp_free_i32(tmp32_2);
1379 #define FP_HELPER_CC(i) \
1380 tmp32_1 = tcg_const_i32(r1); \
1381 tmp32_2 = tcg_const_i32(r2); \
1382 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1383 set_cc_static(s); \
1384 tcg_temp_free_i32(tmp32_1); \
1385 tcg_temp_free_i32(tmp32_2);
1387 switch (op) {
1388 case 0x0: /* LPEBR R1,R2 [RRE] */
1389 FP_HELPER_CC(lpebr);
1390 break;
1391 case 0x3: /* LCEBR R1,R2 [RRE] */
1392 FP_HELPER_CC(lcebr);
1393 break;
1394 case 0x10: /* LPDBR R1,R2 [RRE] */
1395 FP_HELPER_CC(lpdbr);
1396 break;
1397 case 0x13: /* LCDBR R1,R2 [RRE] */
1398 FP_HELPER_CC(lcdbr);
1399 break;
1400 case 0x15: /* SQBDR R1,R2 [RRE] */
1401 FP_HELPER(sqdbr);
1402 break;
1403 case 0x40: /* LPXBR R1,R2 [RRE] */
1404 FP_HELPER_CC(lpxbr);
1405 break;
1406 case 0x43: /* LCXBR R1,R2 [RRE] */
1407 FP_HELPER_CC(lcxbr);
1408 break;
1409 case 0x65: /* LXR R1,R2 [RRE] */
1410 tmp = load_freg(r2);
1411 store_freg(r1, tmp);
1412 tcg_temp_free_i64(tmp);
1413 tmp = load_freg(r2 + 2);
1414 store_freg(r1 + 2, tmp);
1415 tcg_temp_free_i64(tmp);
1416 break;
1417 case 0x74: /* LZER R1 [RRE] */
1418 tmp32_1 = tcg_const_i32(r1);
1419 gen_helper_lzer(cpu_env, tmp32_1);
1420 tcg_temp_free_i32(tmp32_1);
1421 break;
1422 case 0x75: /* LZDR R1 [RRE] */
1423 tmp32_1 = tcg_const_i32(r1);
1424 gen_helper_lzdr(cpu_env, tmp32_1);
1425 tcg_temp_free_i32(tmp32_1);
1426 break;
1427 case 0x76: /* LZXR R1 [RRE] */
1428 tmp32_1 = tcg_const_i32(r1);
1429 gen_helper_lzxr(cpu_env, tmp32_1);
1430 tcg_temp_free_i32(tmp32_1);
1431 break;
1432 case 0x84: /* SFPC R1 [RRE] */
1433 tmp32_1 = load_reg32(r1);
1434 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1435 tcg_temp_free_i32(tmp32_1);
1436 break;
1437 case 0x94: /* CEFBR R1,R2 [RRE] */
1438 case 0x95: /* CDFBR R1,R2 [RRE] */
1439 case 0x96: /* CXFBR R1,R2 [RRE] */
1440 tmp32_1 = tcg_const_i32(r1);
1441 tmp32_2 = load_reg32(r2);
1442 switch (op) {
1443 case 0x94:
1444 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
1445 break;
1446 case 0x95:
1447 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
1448 break;
1449 case 0x96:
1450 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
1451 break;
1452 default:
1453 tcg_abort();
1455 tcg_temp_free_i32(tmp32_1);
1456 tcg_temp_free_i32(tmp32_2);
1457 break;
1458 case 0x98: /* CFEBR R1,R2 [RRE] */
1459 case 0x99: /* CFDBR R1,R2 [RRE] */
1460 case 0x9a: /* CFXBR R1,R2 [RRE] */
1461 tmp32_1 = tcg_const_i32(r1);
1462 tmp32_2 = tcg_const_i32(r2);
1463 tmp32_3 = tcg_const_i32(m3);
1464 switch (op) {
1465 case 0x98:
1466 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1467 break;
1468 case 0x99:
1469 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1470 break;
1471 case 0x9a:
1472 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1473 break;
1474 default:
1475 tcg_abort();
1477 set_cc_static(s);
1478 tcg_temp_free_i32(tmp32_1);
1479 tcg_temp_free_i32(tmp32_2);
1480 tcg_temp_free_i32(tmp32_3);
1481 break;
1482 case 0xa4: /* CEGBR R1,R2 [RRE] */
1483 case 0xa5: /* CDGBR R1,R2 [RRE] */
1484 tmp32_1 = tcg_const_i32(r1);
1485 tmp = load_reg(r2);
1486 switch (op) {
1487 case 0xa4:
1488 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
1489 break;
1490 case 0xa5:
1491 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
1492 break;
1493 default:
1494 tcg_abort();
1496 tcg_temp_free_i32(tmp32_1);
1497 tcg_temp_free_i64(tmp);
1498 break;
1499 case 0xa6: /* CXGBR R1,R2 [RRE] */
1500 tmp32_1 = tcg_const_i32(r1);
1501 tmp = load_reg(r2);
1502 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
1503 tcg_temp_free_i32(tmp32_1);
1504 tcg_temp_free_i64(tmp);
1505 break;
1506 case 0xa8: /* CGEBR R1,R2 [RRE] */
1507 tmp32_1 = tcg_const_i32(r1);
1508 tmp32_2 = tcg_const_i32(r2);
1509 tmp32_3 = tcg_const_i32(m3);
1510 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1511 set_cc_static(s);
1512 tcg_temp_free_i32(tmp32_1);
1513 tcg_temp_free_i32(tmp32_2);
1514 tcg_temp_free_i32(tmp32_3);
1515 break;
1516 case 0xa9: /* CGDBR R1,R2 [RRE] */
1517 tmp32_1 = tcg_const_i32(r1);
1518 tmp32_2 = tcg_const_i32(r2);
1519 tmp32_3 = tcg_const_i32(m3);
1520 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1521 set_cc_static(s);
1522 tcg_temp_free_i32(tmp32_1);
1523 tcg_temp_free_i32(tmp32_2);
1524 tcg_temp_free_i32(tmp32_3);
1525 break;
1526 case 0xaa: /* CGXBR R1,R2 [RRE] */
1527 tmp32_1 = tcg_const_i32(r1);
1528 tmp32_2 = tcg_const_i32(r2);
1529 tmp32_3 = tcg_const_i32(m3);
1530 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1531 set_cc_static(s);
1532 tcg_temp_free_i32(tmp32_1);
1533 tcg_temp_free_i32(tmp32_2);
1534 tcg_temp_free_i32(tmp32_3);
1535 break;
1536 default:
1537 LOG_DISAS("illegal b3 operation 0x%x\n", op);
1538 gen_illegal_opcode(s);
1539 break;
1542 #undef FP_HELPER_CC
1543 #undef FP_HELPER
1546 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
1547 int r2)
1549 TCGv_i64 tmp;
1550 TCGv_i32 tmp32_1;
1552 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1553 switch (op) {
1554 case 0x83: /* FLOGR R1,R2 [RRE] */
1555 tmp = load_reg(r2);
1556 tmp32_1 = tcg_const_i32(r1);
1557 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
1558 set_cc_static(s);
1559 tcg_temp_free_i64(tmp);
1560 tcg_temp_free_i32(tmp32_1);
1561 break;
1562 default:
1563 LOG_DISAS("illegal b9 operation 0x%x\n", op);
1564 gen_illegal_opcode(s);
1565 break;
1569 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1571 unsigned char opc;
1572 uint64_t insn;
1573 int op, r1, r2, r3;
1575 opc = cpu_ldub_code(env, s->pc);
1576 LOG_DISAS("opc 0x%x\n", opc);
1578 switch (opc) {
1579 case 0xb2:
1580 insn = ld_code4(env, s->pc);
1581 op = (insn >> 16) & 0xff;
1582 disas_b2(env, s, op, insn);
1583 break;
1584 case 0xb3:
1585 insn = ld_code4(env, s->pc);
1586 op = (insn >> 16) & 0xff;
1587 r3 = (insn >> 12) & 0xf; /* aka m3 */
1588 r1 = (insn >> 4) & 0xf;
1589 r2 = insn & 0xf;
1590 disas_b3(env, s, op, r3, r1, r2);
1591 break;
1592 case 0xb9:
1593 insn = ld_code4(env, s->pc);
1594 r1 = (insn >> 4) & 0xf;
1595 r2 = insn & 0xf;
1596 op = (insn >> 16) & 0xff;
1597 disas_b9(env, s, op, r1, r2);
1598 break;
1599 default:
1600 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1601 gen_illegal_opcode(s);
1602 break;
1606 /* ====================================================================== */
1607 /* Define the insn format enumeration. */
1608 #define F0(N) FMT_##N,
1609 #define F1(N, X1) F0(N)
1610 #define F2(N, X1, X2) F0(N)
1611 #define F3(N, X1, X2, X3) F0(N)
1612 #define F4(N, X1, X2, X3, X4) F0(N)
1613 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1615 typedef enum {
1616 #include "insn-format.def"
1617 } DisasFormat;
1619 #undef F0
1620 #undef F1
1621 #undef F2
1622 #undef F3
1623 #undef F4
1624 #undef F5
1626 /* Define a structure to hold the decoded fields. We'll store each inside
1627 an array indexed by an enum. In order to conserve memory, we'll arrange
1628 for fields that do not exist at the same time to overlap, thus the "C"
1629 for compact. For checking purposes there is an "O" for original index
1630 as well that will be applied to availability bitmaps. */
1632 enum DisasFieldIndexO {
1633 FLD_O_r1,
1634 FLD_O_r2,
1635 FLD_O_r3,
1636 FLD_O_m1,
1637 FLD_O_m3,
1638 FLD_O_m4,
1639 FLD_O_b1,
1640 FLD_O_b2,
1641 FLD_O_b4,
1642 FLD_O_d1,
1643 FLD_O_d2,
1644 FLD_O_d4,
1645 FLD_O_x2,
1646 FLD_O_l1,
1647 FLD_O_l2,
1648 FLD_O_i1,
1649 FLD_O_i2,
1650 FLD_O_i3,
1651 FLD_O_i4,
1652 FLD_O_i5
1655 enum DisasFieldIndexC {
1656 FLD_C_r1 = 0,
1657 FLD_C_m1 = 0,
1658 FLD_C_b1 = 0,
1659 FLD_C_i1 = 0,
1661 FLD_C_r2 = 1,
1662 FLD_C_b2 = 1,
1663 FLD_C_i2 = 1,
1665 FLD_C_r3 = 2,
1666 FLD_C_m3 = 2,
1667 FLD_C_i3 = 2,
1669 FLD_C_m4 = 3,
1670 FLD_C_b4 = 3,
1671 FLD_C_i4 = 3,
1672 FLD_C_l1 = 3,
1674 FLD_C_i5 = 4,
1675 FLD_C_d1 = 4,
1677 FLD_C_d2 = 5,
1679 FLD_C_d4 = 6,
1680 FLD_C_x2 = 6,
1681 FLD_C_l2 = 6,
1683 NUM_C_FIELD = 7
1686 struct DisasFields {
1687 unsigned op:8;
1688 unsigned op2:8;
1689 unsigned presentC:16;
1690 unsigned int presentO;
1691 int c[NUM_C_FIELD];
1694 /* This is the way fields are to be accessed out of DisasFields. */
1695 #define have_field(S, F) have_field1((S), FLD_O_##F)
1696 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1698 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1700 return (f->presentO >> c) & 1;
1703 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1704 enum DisasFieldIndexC c)
1706 assert(have_field1(f, o));
1707 return f->c[c];
1710 /* Describe the layout of each field in each format. */
1711 typedef struct DisasField {
1712 unsigned int beg:8;
1713 unsigned int size:8;
1714 unsigned int type:2;
1715 unsigned int indexC:6;
1716 enum DisasFieldIndexO indexO:8;
1717 } DisasField;
1719 typedef struct DisasFormatInfo {
1720 DisasField op[NUM_C_FIELD];
1721 } DisasFormatInfo;
1723 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1724 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1725 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1726 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1727 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1728 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1729 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1730 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1731 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1732 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1733 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1734 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1735 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1736 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1738 #define F0(N) { { } },
1739 #define F1(N, X1) { { X1 } },
1740 #define F2(N, X1, X2) { { X1, X2 } },
1741 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1742 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1743 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1745 static const DisasFormatInfo format_info[] = {
1746 #include "insn-format.def"
1749 #undef F0
1750 #undef F1
1751 #undef F2
1752 #undef F3
1753 #undef F4
1754 #undef F5
1755 #undef R
1756 #undef M
1757 #undef BD
1758 #undef BXD
1759 #undef BDL
1760 #undef BXDL
1761 #undef I
1762 #undef L
1764 /* Generally, we'll extract operands into this structures, operate upon
1765 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1766 of routines below for more details. */
1767 typedef struct {
1768 bool g_out, g_out2, g_in1, g_in2;
1769 TCGv_i64 out, out2, in1, in2;
1770 TCGv_i64 addr1;
1771 } DisasOps;
1773 /* Return values from translate_one, indicating the state of the TB. */
1774 typedef enum {
1775 /* Continue the TB. */
1776 NO_EXIT,
1777 /* We have emitted one or more goto_tb. No fixup required. */
1778 EXIT_GOTO_TB,
1779 /* We are not using a goto_tb (for whatever reason), but have updated
1780 the PC (for whatever reason), so there's no need to do it again on
1781 exiting the TB. */
1782 EXIT_PC_UPDATED,
1783 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1784 updated the PC for the next instruction to be executed. */
1785 EXIT_PC_STALE,
1786 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1787 No following code will be executed. */
1788 EXIT_NORETURN,
1789 } ExitStatus;
1791 typedef enum DisasFacility {
1792 FAC_Z, /* zarch (default) */
1793 FAC_CASS, /* compare and swap and store */
1794 FAC_CASS2, /* compare and swap and store 2*/
1795 FAC_DFP, /* decimal floating point */
1796 FAC_DFPR, /* decimal floating point rounding */
1797 FAC_DO, /* distinct operands */
1798 FAC_EE, /* execute extensions */
1799 FAC_EI, /* extended immediate */
1800 FAC_FPE, /* floating point extension */
1801 FAC_FPSSH, /* floating point support sign handling */
1802 FAC_FPRGR, /* FPR-GR transfer */
1803 FAC_GIE, /* general instructions extension */
1804 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1805 FAC_HW, /* high-word */
1806 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1807 FAC_LOC, /* load/store on condition */
1808 FAC_LD, /* long displacement */
1809 FAC_PC, /* population count */
1810 FAC_SCF, /* store clock fast */
1811 FAC_SFLE, /* store facility list extended */
1812 } DisasFacility;
1814 struct DisasInsn {
1815 unsigned opc:16;
1816 DisasFormat fmt:6;
1817 DisasFacility fac:6;
1819 const char *name;
1821 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1822 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1823 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1824 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1825 void (*help_cout)(DisasContext *, DisasOps *);
1826 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1828 uint64_t data;
1831 /* ====================================================================== */
1832 /* Miscelaneous helpers, used by several operations. */
1834 static void help_l2_shift(DisasContext *s, DisasFields *f,
1835 DisasOps *o, int mask)
1837 int b2 = get_field(f, b2);
1838 int d2 = get_field(f, d2);
1840 if (b2 == 0) {
1841 o->in2 = tcg_const_i64(d2 & mask);
1842 } else {
1843 o->in2 = get_address(s, 0, b2, d2);
1844 tcg_gen_andi_i64(o->in2, o->in2, mask);
1848 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1850 if (dest == s->next_pc) {
1851 return NO_EXIT;
1853 if (use_goto_tb(s, dest)) {
1854 gen_update_cc_op(s);
1855 tcg_gen_goto_tb(0);
1856 tcg_gen_movi_i64(psw_addr, dest);
1857 tcg_gen_exit_tb((tcg_target_long)s->tb);
1858 return EXIT_GOTO_TB;
1859 } else {
1860 tcg_gen_movi_i64(psw_addr, dest);
1861 return EXIT_PC_UPDATED;
1865 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1866 bool is_imm, int imm, TCGv_i64 cdest)
1868 ExitStatus ret;
1869 uint64_t dest = s->pc + 2 * imm;
1870 int lab;
1872 /* Take care of the special cases first. */
1873 if (c->cond == TCG_COND_NEVER) {
1874 ret = NO_EXIT;
1875 goto egress;
1877 if (is_imm) {
1878 if (dest == s->next_pc) {
1879 /* Branch to next. */
1880 ret = NO_EXIT;
1881 goto egress;
1883 if (c->cond == TCG_COND_ALWAYS) {
1884 ret = help_goto_direct(s, dest);
1885 goto egress;
1887 } else {
1888 if (TCGV_IS_UNUSED_I64(cdest)) {
1889 /* E.g. bcr %r0 -> no branch. */
1890 ret = NO_EXIT;
1891 goto egress;
1893 if (c->cond == TCG_COND_ALWAYS) {
1894 tcg_gen_mov_i64(psw_addr, cdest);
1895 ret = EXIT_PC_UPDATED;
1896 goto egress;
1900 if (use_goto_tb(s, s->next_pc)) {
1901 if (is_imm && use_goto_tb(s, dest)) {
1902 /* Both exits can use goto_tb. */
1903 gen_update_cc_op(s);
1905 lab = gen_new_label();
1906 if (c->is_64) {
1907 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1908 } else {
1909 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1912 /* Branch not taken. */
1913 tcg_gen_goto_tb(0);
1914 tcg_gen_movi_i64(psw_addr, s->next_pc);
1915 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1917 /* Branch taken. */
1918 gen_set_label(lab);
1919 tcg_gen_goto_tb(1);
1920 tcg_gen_movi_i64(psw_addr, dest);
1921 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1923 ret = EXIT_GOTO_TB;
1924 } else {
1925 /* Fallthru can use goto_tb, but taken branch cannot. */
1926 /* Store taken branch destination before the brcond. This
1927 avoids having to allocate a new local temp to hold it.
1928 We'll overwrite this in the not taken case anyway. */
1929 if (!is_imm) {
1930 tcg_gen_mov_i64(psw_addr, cdest);
1933 lab = gen_new_label();
1934 if (c->is_64) {
1935 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1936 } else {
1937 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1940 /* Branch not taken. */
1941 gen_update_cc_op(s);
1942 tcg_gen_goto_tb(0);
1943 tcg_gen_movi_i64(psw_addr, s->next_pc);
1944 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1946 gen_set_label(lab);
1947 if (is_imm) {
1948 tcg_gen_movi_i64(psw_addr, dest);
1950 ret = EXIT_PC_UPDATED;
1952 } else {
1953 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1954 Most commonly we're single-stepping or some other condition that
1955 disables all use of goto_tb. Just update the PC and exit. */
1957 TCGv_i64 next = tcg_const_i64(s->next_pc);
1958 if (is_imm) {
1959 cdest = tcg_const_i64(dest);
1962 if (c->is_64) {
1963 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1964 cdest, next);
1965 } else {
1966 TCGv_i32 t0 = tcg_temp_new_i32();
1967 TCGv_i64 t1 = tcg_temp_new_i64();
1968 TCGv_i64 z = tcg_const_i64(0);
1969 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1970 tcg_gen_extu_i32_i64(t1, t0);
1971 tcg_temp_free_i32(t0);
1972 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1973 tcg_temp_free_i64(t1);
1974 tcg_temp_free_i64(z);
1977 if (is_imm) {
1978 tcg_temp_free_i64(cdest);
1980 tcg_temp_free_i64(next);
1982 ret = EXIT_PC_UPDATED;
1985 egress:
1986 free_compare(c);
1987 return ret;
1990 /* ====================================================================== */
1991 /* The operations. These perform the bulk of the work for any insn,
1992 usually after the operands have been loaded and output initialized. */
1994 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1996 gen_helper_abs_i64(o->out, o->in2);
1997 return NO_EXIT;
2000 static ExitStatus op_add(DisasContext *s, DisasOps *o)
2002 tcg_gen_add_i64(o->out, o->in1, o->in2);
2003 return NO_EXIT;
2006 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
2008 TCGv_i64 cc;
2010 tcg_gen_add_i64(o->out, o->in1, o->in2);
2012 /* XXX possible optimization point */
2013 gen_op_calc_cc(s);
2014 cc = tcg_temp_new_i64();
2015 tcg_gen_extu_i32_i64(cc, cc_op);
2016 tcg_gen_shri_i64(cc, cc, 1);
2018 tcg_gen_add_i64(o->out, o->out, cc);
2019 tcg_temp_free_i64(cc);
2020 return NO_EXIT;
2023 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
2025 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
2026 return NO_EXIT;
2029 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
2031 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
2032 return NO_EXIT;
2035 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
2037 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2038 return_low128(o->out2);
2039 return NO_EXIT;
2042 static ExitStatus op_and(DisasContext *s, DisasOps *o)
2044 tcg_gen_and_i64(o->out, o->in1, o->in2);
2045 return NO_EXIT;
2048 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
2050 int shift = s->insn->data & 0xff;
2051 int size = s->insn->data >> 8;
2052 uint64_t mask = ((1ull << size) - 1) << shift;
2054 assert(!o->g_in2);
2055 tcg_gen_shli_i64(o->in2, o->in2, shift);
2056 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2057 tcg_gen_and_i64(o->out, o->in1, o->in2);
2059 /* Produce the CC from only the bits manipulated. */
2060 tcg_gen_andi_i64(cc_dst, o->out, mask);
2061 set_cc_nz_u64(s, cc_dst);
2062 return NO_EXIT;
2065 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
2067 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2068 if (!TCGV_IS_UNUSED_I64(o->in2)) {
2069 tcg_gen_mov_i64(psw_addr, o->in2);
2070 return EXIT_PC_UPDATED;
2071 } else {
2072 return NO_EXIT;
2076 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
2078 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2079 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
2082 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
2084 int m1 = get_field(s->fields, m1);
2085 bool is_imm = have_field(s->fields, i2);
2086 int imm = is_imm ? get_field(s->fields, i2) : 0;
2087 DisasCompare c;
2089 disas_jcc(s, &c, m1);
2090 return help_branch(s, &c, is_imm, imm, o->in2);
2093 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
2095 int r1 = get_field(s->fields, r1);
2096 bool is_imm = have_field(s->fields, i2);
2097 int imm = is_imm ? get_field(s->fields, i2) : 0;
2098 DisasCompare c;
2099 TCGv_i64 t;
2101 c.cond = TCG_COND_NE;
2102 c.is_64 = false;
2103 c.g1 = false;
2104 c.g2 = false;
2106 t = tcg_temp_new_i64();
2107 tcg_gen_subi_i64(t, regs[r1], 1);
2108 store_reg32_i64(r1, t);
2109 c.u.s32.a = tcg_temp_new_i32();
2110 c.u.s32.b = tcg_const_i32(0);
2111 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2112 tcg_temp_free_i64(t);
2114 return help_branch(s, &c, is_imm, imm, o->in2);
2117 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2119 int r1 = get_field(s->fields, r1);
2120 bool is_imm = have_field(s->fields, i2);
2121 int imm = is_imm ? get_field(s->fields, i2) : 0;
2122 DisasCompare c;
2124 c.cond = TCG_COND_NE;
2125 c.is_64 = true;
2126 c.g1 = true;
2127 c.g2 = false;
2129 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2130 c.u.s64.a = regs[r1];
2131 c.u.s64.b = tcg_const_i64(0);
2133 return help_branch(s, &c, is_imm, imm, o->in2);
2136 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
2138 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
2139 set_cc_static(s);
2140 return NO_EXIT;
2143 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
2145 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
2146 set_cc_static(s);
2147 return NO_EXIT;
2150 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
2152 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2153 set_cc_static(s);
2154 return NO_EXIT;
2157 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
2159 int l = get_field(s->fields, l1);
2160 TCGv_i32 vl;
2162 switch (l + 1) {
2163 case 1:
2164 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2165 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2166 break;
2167 case 2:
2168 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2169 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2170 break;
2171 case 4:
2172 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2173 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2174 break;
2175 case 8:
2176 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2177 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2178 break;
2179 default:
2180 potential_page_fault(s);
2181 vl = tcg_const_i32(l);
2182 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2183 tcg_temp_free_i32(vl);
2184 set_cc_static(s);
2185 return NO_EXIT;
2187 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2188 return NO_EXIT;
2191 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2193 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2194 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2195 potential_page_fault(s);
2196 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2197 tcg_temp_free_i32(r1);
2198 tcg_temp_free_i32(r3);
2199 set_cc_static(s);
2200 return NO_EXIT;
2203 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
2205 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2206 TCGv_i32 t1 = tcg_temp_new_i32();
2207 tcg_gen_trunc_i64_i32(t1, o->in1);
2208 potential_page_fault(s);
2209 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2210 set_cc_static(s);
2211 tcg_temp_free_i32(t1);
2212 tcg_temp_free_i32(m3);
2213 return NO_EXIT;
2216 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2218 int r3 = get_field(s->fields, r3);
2219 potential_page_fault(s);
2220 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2221 set_cc_static(s);
2222 return NO_EXIT;
2225 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2227 int r3 = get_field(s->fields, r3);
2228 potential_page_fault(s);
2229 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2230 set_cc_static(s);
2231 return NO_EXIT;
2234 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2236 int r3 = get_field(s->fields, r3);
2237 TCGv_i64 in3 = tcg_temp_new_i64();
2238 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2239 potential_page_fault(s);
2240 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2241 tcg_temp_free_i64(in3);
2242 set_cc_static(s);
2243 return NO_EXIT;
2246 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2248 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2249 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2250 potential_page_fault(s);
2251 /* XXX rewrite in tcg */
2252 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2253 set_cc_static(s);
2254 return NO_EXIT;
2257 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2259 TCGv_i64 t1 = tcg_temp_new_i64();
2260 TCGv_i32 t2 = tcg_temp_new_i32();
2261 tcg_gen_trunc_i64_i32(t2, o->in1);
2262 gen_helper_cvd(t1, t2);
2263 tcg_temp_free_i32(t2);
2264 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2265 tcg_temp_free_i64(t1);
2266 return NO_EXIT;
2269 #ifndef CONFIG_USER_ONLY
2270 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2272 TCGv_i32 tmp;
2274 check_privileged(s);
2275 potential_page_fault(s);
2277 /* We pretend the format is RX_a so that D2 is the field we want. */
2278 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2279 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2280 tcg_temp_free_i32(tmp);
2281 return NO_EXIT;
2283 #endif
2285 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2287 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2288 return_low128(o->out);
2289 return NO_EXIT;
2292 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2294 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2295 return_low128(o->out);
2296 return NO_EXIT;
2299 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2301 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2302 return_low128(o->out);
2303 return NO_EXIT;
2306 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2308 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2309 return_low128(o->out);
2310 return NO_EXIT;
2313 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2315 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2316 return NO_EXIT;
2319 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2321 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2322 return NO_EXIT;
2325 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2327 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2328 return_low128(o->out2);
2329 return NO_EXIT;
2332 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2334 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2335 return NO_EXIT;
2338 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2340 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2341 tb->flags, (ab)use the tb->cs_base field as the address of
2342 the template in memory, and grab 8 bits of tb->flags/cflags for
2343 the contents of the register. We would then recognize all this
2344 in gen_intermediate_code_internal, generating code for exactly
2345 one instruction. This new TB then gets executed normally.
2347 On the other hand, this seems to be mostly used for modifying
2348 MVC inside of memcpy, which needs a helper call anyway. So
2349 perhaps this doesn't bear thinking about any further. */
2351 TCGv_i64 tmp;
2353 update_psw_addr(s);
2354 gen_op_calc_cc(s);
2356 tmp = tcg_const_i64(s->next_pc);
2357 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2358 tcg_temp_free_i64(tmp);
2360 set_cc_static(s);
2361 return NO_EXIT;
2364 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2366 int m3 = get_field(s->fields, m3);
2367 int pos, len, base = s->insn->data;
2368 TCGv_i64 tmp = tcg_temp_new_i64();
2369 uint64_t ccm;
2371 switch (m3) {
2372 case 0xf:
2373 /* Effectively a 32-bit load. */
2374 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2375 len = 32;
2376 goto one_insert;
2378 case 0xc:
2379 case 0x6:
2380 case 0x3:
2381 /* Effectively a 16-bit load. */
2382 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2383 len = 16;
2384 goto one_insert;
2386 case 0x8:
2387 case 0x4:
2388 case 0x2:
2389 case 0x1:
2390 /* Effectively an 8-bit load. */
2391 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2392 len = 8;
2393 goto one_insert;
2395 one_insert:
2396 pos = base + ctz32(m3) * 8;
2397 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2398 ccm = ((1ull << len) - 1) << pos;
2399 break;
2401 default:
2402 /* This is going to be a sequence of loads and inserts. */
2403 pos = base + 32 - 8;
2404 ccm = 0;
2405 while (m3) {
2406 if (m3 & 0x8) {
2407 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2408 tcg_gen_addi_i64(o->in2, o->in2, 1);
2409 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2410 ccm |= 0xff << pos;
2412 m3 = (m3 << 1) & 0xf;
2413 pos -= 8;
2415 break;
2418 tcg_gen_movi_i64(tmp, ccm);
2419 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2420 tcg_temp_free_i64(tmp);
2421 return NO_EXIT;
2424 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2426 int shift = s->insn->data & 0xff;
2427 int size = s->insn->data >> 8;
2428 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2429 return NO_EXIT;
2432 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2434 gen_helper_ldeb(o->out, cpu_env, o->in2);
2435 return NO_EXIT;
2438 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2440 gen_helper_ledb(o->out, cpu_env, o->in2);
2441 return NO_EXIT;
2444 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2446 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2447 return NO_EXIT;
2450 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2452 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2453 return NO_EXIT;
2456 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2458 gen_helper_lxdb(o->out, cpu_env, o->in2);
2459 return_low128(o->out2);
2460 return NO_EXIT;
2463 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2465 gen_helper_lxeb(o->out, cpu_env, o->in2);
2466 return_low128(o->out2);
2467 return NO_EXIT;
2470 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2472 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2473 return NO_EXIT;
2476 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2478 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2479 return NO_EXIT;
2482 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2484 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2485 return NO_EXIT;
2488 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2490 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2491 return NO_EXIT;
2494 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2496 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2497 return NO_EXIT;
2500 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2502 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2503 return NO_EXIT;
2506 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2508 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2509 return NO_EXIT;
2512 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2514 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2515 return NO_EXIT;
2518 #ifndef CONFIG_USER_ONLY
2519 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2521 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2522 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2523 check_privileged(s);
2524 potential_page_fault(s);
2525 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2526 tcg_temp_free_i32(r1);
2527 tcg_temp_free_i32(r3);
2528 return NO_EXIT;
2531 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2533 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2534 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2535 check_privileged(s);
2536 potential_page_fault(s);
2537 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2538 tcg_temp_free_i32(r1);
2539 tcg_temp_free_i32(r3);
2540 return NO_EXIT;
2542 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2544 check_privileged(s);
2545 potential_page_fault(s);
2546 gen_helper_lra(o->out, cpu_env, o->in2);
2547 set_cc_static(s);
2548 return NO_EXIT;
2551 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2553 TCGv_i64 t1, t2;
2555 check_privileged(s);
2557 t1 = tcg_temp_new_i64();
2558 t2 = tcg_temp_new_i64();
2559 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2560 tcg_gen_addi_i64(o->in2, o->in2, 4);
2561 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2562 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2563 tcg_gen_shli_i64(t1, t1, 32);
2564 gen_helper_load_psw(cpu_env, t1, t2);
2565 tcg_temp_free_i64(t1);
2566 tcg_temp_free_i64(t2);
2567 return EXIT_NORETURN;
2569 #endif
2571 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2573 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2574 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2575 potential_page_fault(s);
2576 gen_helper_lam(cpu_env, r1, o->in2, r3);
2577 tcg_temp_free_i32(r1);
2578 tcg_temp_free_i32(r3);
2579 return NO_EXIT;
2582 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2584 int r1 = get_field(s->fields, r1);
2585 int r3 = get_field(s->fields, r3);
2586 TCGv_i64 t = tcg_temp_new_i64();
2587 TCGv_i64 t4 = tcg_const_i64(4);
2589 while (1) {
2590 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2591 store_reg32_i64(r1, t);
2592 if (r1 == r3) {
2593 break;
2595 tcg_gen_add_i64(o->in2, o->in2, t4);
2596 r1 = (r1 + 1) & 15;
2599 tcg_temp_free_i64(t);
2600 tcg_temp_free_i64(t4);
2601 return NO_EXIT;
2604 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2606 int r1 = get_field(s->fields, r1);
2607 int r3 = get_field(s->fields, r3);
2608 TCGv_i64 t = tcg_temp_new_i64();
2609 TCGv_i64 t4 = tcg_const_i64(4);
2611 while (1) {
2612 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2613 store_reg32h_i64(r1, t);
2614 if (r1 == r3) {
2615 break;
2617 tcg_gen_add_i64(o->in2, o->in2, t4);
2618 r1 = (r1 + 1) & 15;
2621 tcg_temp_free_i64(t);
2622 tcg_temp_free_i64(t4);
2623 return NO_EXIT;
2626 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2628 int r1 = get_field(s->fields, r1);
2629 int r3 = get_field(s->fields, r3);
2630 TCGv_i64 t8 = tcg_const_i64(8);
2632 while (1) {
2633 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2634 if (r1 == r3) {
2635 break;
2637 tcg_gen_add_i64(o->in2, o->in2, t8);
2638 r1 = (r1 + 1) & 15;
2641 tcg_temp_free_i64(t8);
2642 return NO_EXIT;
2645 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2647 o->out = o->in2;
2648 o->g_out = o->g_in2;
2649 TCGV_UNUSED_I64(o->in2);
2650 o->g_in2 = false;
2651 return NO_EXIT;
2654 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2656 o->out = o->in1;
2657 o->out2 = o->in2;
2658 o->g_out = o->g_in1;
2659 o->g_out2 = o->g_in2;
2660 TCGV_UNUSED_I64(o->in1);
2661 TCGV_UNUSED_I64(o->in2);
2662 o->g_in1 = o->g_in2 = false;
2663 return NO_EXIT;
2666 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2668 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2669 potential_page_fault(s);
2670 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2671 tcg_temp_free_i32(l);
2672 return NO_EXIT;
2675 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2677 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2678 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2679 potential_page_fault(s);
2680 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2681 tcg_temp_free_i32(r1);
2682 tcg_temp_free_i32(r2);
2683 set_cc_static(s);
2684 return NO_EXIT;
2687 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2689 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2690 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2691 potential_page_fault(s);
2692 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2693 tcg_temp_free_i32(r1);
2694 tcg_temp_free_i32(r3);
2695 set_cc_static(s);
2696 return NO_EXIT;
2699 #ifndef CONFIG_USER_ONLY
2700 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2702 int r1 = get_field(s->fields, l1);
2703 check_privileged(s);
2704 potential_page_fault(s);
2705 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2706 set_cc_static(s);
2707 return NO_EXIT;
2710 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2712 int r1 = get_field(s->fields, l1);
2713 check_privileged(s);
2714 potential_page_fault(s);
2715 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2716 set_cc_static(s);
2717 return NO_EXIT;
2719 #endif
2721 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2723 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2724 return NO_EXIT;
2727 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2729 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2730 return_low128(o->out2);
2731 return NO_EXIT;
2734 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2736 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2737 return NO_EXIT;
2740 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2742 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2743 return NO_EXIT;
2746 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2748 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2749 return NO_EXIT;
2752 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2754 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2755 return_low128(o->out2);
2756 return NO_EXIT;
2759 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2761 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2762 return_low128(o->out2);
2763 return NO_EXIT;
2766 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2768 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2769 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2770 tcg_temp_free_i64(r3);
2771 return NO_EXIT;
2774 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2776 int r3 = get_field(s->fields, r3);
2777 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2778 return NO_EXIT;
2781 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2783 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2784 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2785 tcg_temp_free_i64(r3);
2786 return NO_EXIT;
2789 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2791 int r3 = get_field(s->fields, r3);
2792 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2793 return NO_EXIT;
2796 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2798 gen_helper_nabs_i64(o->out, o->in2);
2799 return NO_EXIT;
2802 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2804 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2805 potential_page_fault(s);
2806 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2807 tcg_temp_free_i32(l);
2808 set_cc_static(s);
2809 return NO_EXIT;
2812 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2814 tcg_gen_neg_i64(o->out, o->in2);
2815 return NO_EXIT;
2818 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2820 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2821 potential_page_fault(s);
2822 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2823 tcg_temp_free_i32(l);
2824 set_cc_static(s);
2825 return NO_EXIT;
2828 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2830 tcg_gen_or_i64(o->out, o->in1, o->in2);
2831 return NO_EXIT;
2834 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2836 int shift = s->insn->data & 0xff;
2837 int size = s->insn->data >> 8;
2838 uint64_t mask = ((1ull << size) - 1) << shift;
2840 assert(!o->g_in2);
2841 tcg_gen_shli_i64(o->in2, o->in2, shift);
2842 tcg_gen_or_i64(o->out, o->in1, o->in2);
2844 /* Produce the CC from only the bits manipulated. */
2845 tcg_gen_andi_i64(cc_dst, o->out, mask);
2846 set_cc_nz_u64(s, cc_dst);
2847 return NO_EXIT;
2850 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2852 tcg_gen_bswap16_i64(o->out, o->in2);
2853 return NO_EXIT;
2856 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2858 tcg_gen_bswap32_i64(o->out, o->in2);
2859 return NO_EXIT;
2862 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2864 tcg_gen_bswap64_i64(o->out, o->in2);
2865 return NO_EXIT;
2868 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2870 TCGv_i32 t1 = tcg_temp_new_i32();
2871 TCGv_i32 t2 = tcg_temp_new_i32();
2872 TCGv_i32 to = tcg_temp_new_i32();
2873 tcg_gen_trunc_i64_i32(t1, o->in1);
2874 tcg_gen_trunc_i64_i32(t2, o->in2);
2875 tcg_gen_rotl_i32(to, t1, t2);
2876 tcg_gen_extu_i32_i64(o->out, to);
2877 tcg_temp_free_i32(t1);
2878 tcg_temp_free_i32(t2);
2879 tcg_temp_free_i32(to);
2880 return NO_EXIT;
2883 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2885 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2886 return NO_EXIT;
2889 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2891 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2892 return NO_EXIT;
2895 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2897 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2898 return NO_EXIT;
2901 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2903 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2904 return_low128(o->out2);
2905 return NO_EXIT;
2908 #ifndef CONFIG_USER_ONLY
2909 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2911 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2912 check_privileged(s);
2913 potential_page_fault(s);
2914 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2915 tcg_temp_free_i32(r1);
2916 return NO_EXIT;
2918 #endif
2920 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2922 uint64_t sign = 1ull << s->insn->data;
2923 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2924 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2925 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2926 /* The arithmetic left shift is curious in that it does not affect
2927 the sign bit. Copy that over from the source unchanged. */
2928 tcg_gen_andi_i64(o->out, o->out, ~sign);
2929 tcg_gen_andi_i64(o->in1, o->in1, sign);
2930 tcg_gen_or_i64(o->out, o->out, o->in1);
2931 return NO_EXIT;
2934 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2936 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2937 return NO_EXIT;
2940 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2942 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2943 return NO_EXIT;
2946 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2948 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2949 return NO_EXIT;
2952 #ifndef CONFIG_USER_ONLY
2953 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2955 check_privileged(s);
2956 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2957 return NO_EXIT;
2960 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2962 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2963 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2964 check_privileged(s);
2965 potential_page_fault(s);
2966 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2967 tcg_temp_free_i32(r1);
2968 tcg_temp_free_i32(r3);
2969 return NO_EXIT;
2972 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2974 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2975 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2976 check_privileged(s);
2977 potential_page_fault(s);
2978 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2979 tcg_temp_free_i32(r1);
2980 tcg_temp_free_i32(r3);
2981 return NO_EXIT;
2984 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2986 uint64_t i2 = get_field(s->fields, i2);
2987 TCGv_i64 t;
2989 check_privileged(s);
2991 /* It is important to do what the instruction name says: STORE THEN.
2992 If we let the output hook perform the store then if we fault and
2993 restart, we'll have the wrong SYSTEM MASK in place. */
2994 t = tcg_temp_new_i64();
2995 tcg_gen_shri_i64(t, psw_mask, 56);
2996 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2997 tcg_temp_free_i64(t);
2999 if (s->fields->op == 0xac) {
3000 tcg_gen_andi_i64(psw_mask, psw_mask,
3001 (i2 << 56) | 0x00ffffffffffffffull);
3002 } else {
3003 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3005 return NO_EXIT;
3007 #endif
3009 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3011 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3012 return NO_EXIT;
3015 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3017 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3018 return NO_EXIT;
3021 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3023 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3024 return NO_EXIT;
3027 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3029 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3030 return NO_EXIT;
3033 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3035 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3036 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3037 potential_page_fault(s);
3038 gen_helper_stam(cpu_env, r1, o->in2, r3);
3039 tcg_temp_free_i32(r1);
3040 tcg_temp_free_i32(r3);
3041 return NO_EXIT;
3044 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3046 int m3 = get_field(s->fields, m3);
3047 int pos, base = s->insn->data;
3048 TCGv_i64 tmp = tcg_temp_new_i64();
3050 pos = base + ctz32(m3) * 8;
3051 switch (m3) {
3052 case 0xf:
3053 /* Effectively a 32-bit store. */
3054 tcg_gen_shri_i64(tmp, o->in1, pos);
3055 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3056 break;
3058 case 0xc:
3059 case 0x6:
3060 case 0x3:
3061 /* Effectively a 16-bit store. */
3062 tcg_gen_shri_i64(tmp, o->in1, pos);
3063 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3064 break;
3066 case 0x8:
3067 case 0x4:
3068 case 0x2:
3069 case 0x1:
3070 /* Effectively an 8-bit store. */
3071 tcg_gen_shri_i64(tmp, o->in1, pos);
3072 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3073 break;
3075 default:
3076 /* This is going to be a sequence of shifts and stores. */
3077 pos = base + 32 - 8;
3078 while (m3) {
3079 if (m3 & 0x8) {
3080 tcg_gen_shri_i64(tmp, o->in1, pos);
3081 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3082 tcg_gen_addi_i64(o->in2, o->in2, 1);
3084 m3 = (m3 << 1) & 0xf;
3085 pos -= 8;
3087 break;
3089 tcg_temp_free_i64(tmp);
3090 return NO_EXIT;
3093 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3095 int r1 = get_field(s->fields, r1);
3096 int r3 = get_field(s->fields, r3);
3097 int size = s->insn->data;
3098 TCGv_i64 tsize = tcg_const_i64(size);
3100 while (1) {
3101 if (size == 8) {
3102 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3103 } else {
3104 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3106 if (r1 == r3) {
3107 break;
3109 tcg_gen_add_i64(o->in2, o->in2, tsize);
3110 r1 = (r1 + 1) & 15;
3113 tcg_temp_free_i64(tsize);
3114 return NO_EXIT;
3117 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3119 int r1 = get_field(s->fields, r1);
3120 int r3 = get_field(s->fields, r3);
3121 TCGv_i64 t = tcg_temp_new_i64();
3122 TCGv_i64 t4 = tcg_const_i64(4);
3123 TCGv_i64 t32 = tcg_const_i64(32);
3125 while (1) {
3126 tcg_gen_shl_i64(t, regs[r1], t32);
3127 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3128 if (r1 == r3) {
3129 break;
3131 tcg_gen_add_i64(o->in2, o->in2, t4);
3132 r1 = (r1 + 1) & 15;
3135 tcg_temp_free_i64(t);
3136 tcg_temp_free_i64(t4);
3137 tcg_temp_free_i64(t32);
3138 return NO_EXIT;
3141 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3143 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3144 return NO_EXIT;
3147 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3149 TCGv_i64 cc;
3151 assert(!o->g_in2);
3152 tcg_gen_not_i64(o->in2, o->in2);
3153 tcg_gen_add_i64(o->out, o->in1, o->in2);
3155 /* XXX possible optimization point */
3156 gen_op_calc_cc(s);
3157 cc = tcg_temp_new_i64();
3158 tcg_gen_extu_i32_i64(cc, cc_op);
3159 tcg_gen_shri_i64(cc, cc, 1);
3160 tcg_gen_add_i64(o->out, o->out, cc);
3161 tcg_temp_free_i64(cc);
3162 return NO_EXIT;
3165 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3167 TCGv_i32 t;
3169 update_psw_addr(s);
3170 gen_op_calc_cc(s);
3172 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3173 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3174 tcg_temp_free_i32(t);
3176 t = tcg_const_i32(s->next_pc - s->pc);
3177 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3178 tcg_temp_free_i32(t);
3180 gen_exception(EXCP_SVC);
3181 return EXIT_NORETURN;
3184 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3186 gen_helper_tceb(cc_op, o->in1, o->in2);
3187 set_cc_static(s);
3188 return NO_EXIT;
3191 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3193 gen_helper_tcdb(cc_op, o->in1, o->in2);
3194 set_cc_static(s);
3195 return NO_EXIT;
3198 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3200 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3201 set_cc_static(s);
3202 return NO_EXIT;
3205 #ifndef CONFIG_USER_ONLY
3206 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3208 potential_page_fault(s);
3209 gen_helper_tprot(cc_op, o->addr1, o->in2);
3210 set_cc_static(s);
3211 return NO_EXIT;
3213 #endif
3215 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3217 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3218 potential_page_fault(s);
3219 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3220 tcg_temp_free_i32(l);
3221 set_cc_static(s);
3222 return NO_EXIT;
3225 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3227 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3228 potential_page_fault(s);
3229 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3230 tcg_temp_free_i32(l);
3231 return NO_EXIT;
3234 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3236 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3237 potential_page_fault(s);
3238 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3239 tcg_temp_free_i32(l);
3240 set_cc_static(s);
3241 return NO_EXIT;
3244 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3246 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3247 return NO_EXIT;
3250 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3252 int shift = s->insn->data & 0xff;
3253 int size = s->insn->data >> 8;
3254 uint64_t mask = ((1ull << size) - 1) << shift;
3256 assert(!o->g_in2);
3257 tcg_gen_shli_i64(o->in2, o->in2, shift);
3258 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3260 /* Produce the CC from only the bits manipulated. */
3261 tcg_gen_andi_i64(cc_dst, o->out, mask);
3262 set_cc_nz_u64(s, cc_dst);
3263 return NO_EXIT;
3266 /* ====================================================================== */
3267 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3268 the original inputs), update the various cc data structures in order to
3269 be able to compute the new condition code. */
3271 static void cout_abs32(DisasContext *s, DisasOps *o)
3273 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3276 static void cout_abs64(DisasContext *s, DisasOps *o)
3278 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3281 static void cout_adds32(DisasContext *s, DisasOps *o)
3283 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3286 static void cout_adds64(DisasContext *s, DisasOps *o)
3288 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3291 static void cout_addu32(DisasContext *s, DisasOps *o)
3293 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3296 static void cout_addu64(DisasContext *s, DisasOps *o)
3298 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3301 static void cout_addc32(DisasContext *s, DisasOps *o)
3303 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3306 static void cout_addc64(DisasContext *s, DisasOps *o)
3308 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3311 static void cout_cmps32(DisasContext *s, DisasOps *o)
3313 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3316 static void cout_cmps64(DisasContext *s, DisasOps *o)
3318 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3321 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3323 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3326 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3328 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3331 static void cout_f32(DisasContext *s, DisasOps *o)
3333 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3336 static void cout_f64(DisasContext *s, DisasOps *o)
3338 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3341 static void cout_f128(DisasContext *s, DisasOps *o)
3343 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3346 static void cout_nabs32(DisasContext *s, DisasOps *o)
3348 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3351 static void cout_nabs64(DisasContext *s, DisasOps *o)
3353 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3356 static void cout_neg32(DisasContext *s, DisasOps *o)
3358 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3361 static void cout_neg64(DisasContext *s, DisasOps *o)
3363 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3366 static void cout_nz32(DisasContext *s, DisasOps *o)
3368 tcg_gen_ext32u_i64(cc_dst, o->out);
3369 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3372 static void cout_nz64(DisasContext *s, DisasOps *o)
3374 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3377 static void cout_s32(DisasContext *s, DisasOps *o)
3379 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3382 static void cout_s64(DisasContext *s, DisasOps *o)
3384 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3387 static void cout_subs32(DisasContext *s, DisasOps *o)
3389 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3392 static void cout_subs64(DisasContext *s, DisasOps *o)
3394 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3397 static void cout_subu32(DisasContext *s, DisasOps *o)
3399 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3402 static void cout_subu64(DisasContext *s, DisasOps *o)
3404 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3407 static void cout_subb32(DisasContext *s, DisasOps *o)
3409 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3412 static void cout_subb64(DisasContext *s, DisasOps *o)
3414 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3417 static void cout_tm32(DisasContext *s, DisasOps *o)
3419 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3422 static void cout_tm64(DisasContext *s, DisasOps *o)
3424 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3427 /* ====================================================================== */
3428 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3429 with the TCG register to which we will write. Used in combination with
3430 the "wout" generators, in some cases we need a new temporary, and in
3431 some cases we can write to a TCG global. */
3433 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3435 o->out = tcg_temp_new_i64();
3438 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3440 o->out = tcg_temp_new_i64();
3441 o->out2 = tcg_temp_new_i64();
3444 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3446 o->out = regs[get_field(f, r1)];
3447 o->g_out = true;
3450 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3452 /* ??? Specification exception: r1 must be even. */
3453 int r1 = get_field(f, r1);
3454 o->out = regs[r1];
3455 o->out2 = regs[(r1 + 1) & 15];
3456 o->g_out = o->g_out2 = true;
3459 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3461 o->out = fregs[get_field(f, r1)];
3462 o->g_out = true;
3465 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3467 /* ??? Specification exception: r1 must be < 14. */
3468 int r1 = get_field(f, r1);
3469 o->out = fregs[r1];
3470 o->out2 = fregs[(r1 + 2) & 15];
3471 o->g_out = o->g_out2 = true;
3474 /* ====================================================================== */
3475 /* The "Write OUTput" generators. These generally perform some non-trivial
3476 copy of data to TCG globals, or to main memory. The trivial cases are
3477 generally handled by having a "prep" generator install the TCG global
3478 as the destination of the operation. */
3480 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3482 store_reg(get_field(f, r1), o->out);
3485 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3487 int r1 = get_field(f, r1);
3488 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3491 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3493 int r1 = get_field(f, r1);
3494 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3497 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3499 store_reg32_i64(get_field(f, r1), o->out);
3502 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3504 /* ??? Specification exception: r1 must be even. */
3505 int r1 = get_field(f, r1);
3506 store_reg32_i64(r1, o->out);
3507 store_reg32_i64((r1 + 1) & 15, o->out2);
3510 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3512 /* ??? Specification exception: r1 must be even. */
3513 int r1 = get_field(f, r1);
3514 store_reg32_i64((r1 + 1) & 15, o->out);
3515 tcg_gen_shri_i64(o->out, o->out, 32);
3516 store_reg32_i64(r1, o->out);
3519 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3521 store_freg32_i64(get_field(f, r1), o->out);
3524 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3526 store_freg(get_field(f, r1), o->out);
3529 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3531 /* ??? Specification exception: r1 must be < 14. */
3532 int f1 = get_field(s->fields, r1);
3533 store_freg(f1, o->out);
3534 store_freg((f1 + 2) & 15, o->out2);
3537 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3539 if (get_field(f, r1) != get_field(f, r2)) {
3540 store_reg32_i64(get_field(f, r1), o->out);
3544 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3546 if (get_field(f, r1) != get_field(f, r2)) {
3547 store_freg32_i64(get_field(f, r1), o->out);
3551 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3553 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3556 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3558 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3561 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3563 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3566 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3568 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3571 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3573 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3576 /* ====================================================================== */
3577 /* The "INput 1" generators. These load the first operand to an insn. */
3579 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3581 o->in1 = load_reg(get_field(f, r1));
3584 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3586 o->in1 = regs[get_field(f, r1)];
3587 o->g_in1 = true;
3590 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3592 o->in1 = tcg_temp_new_i64();
3593 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3596 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3598 o->in1 = tcg_temp_new_i64();
3599 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3602 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3604 o->in1 = tcg_temp_new_i64();
3605 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3608 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3610 /* ??? Specification exception: r1 must be even. */
3611 int r1 = get_field(f, r1);
3612 o->in1 = load_reg((r1 + 1) & 15);
3615 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3617 /* ??? Specification exception: r1 must be even. */
3618 int r1 = get_field(f, r1);
3619 o->in1 = tcg_temp_new_i64();
3620 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3623 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3625 /* ??? Specification exception: r1 must be even. */
3626 int r1 = get_field(f, r1);
3627 o->in1 = tcg_temp_new_i64();
3628 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3631 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3633 /* ??? Specification exception: r1 must be even. */
3634 int r1 = get_field(f, r1);
3635 o->in1 = tcg_temp_new_i64();
3636 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3639 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3641 o->in1 = load_reg(get_field(f, r2));
3644 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3646 o->in1 = load_reg(get_field(f, r3));
3649 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3651 o->in1 = regs[get_field(f, r3)];
3652 o->g_in1 = true;
3655 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3657 o->in1 = tcg_temp_new_i64();
3658 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3661 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3663 o->in1 = tcg_temp_new_i64();
3664 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3667 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3669 o->in1 = load_freg32_i64(get_field(f, r1));
3672 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3674 o->in1 = fregs[get_field(f, r1)];
3675 o->g_in1 = true;
3678 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3680 /* ??? Specification exception: r1 must be < 14. */
3681 int r1 = get_field(f, r1);
3682 o->out = fregs[r1];
3683 o->out2 = fregs[(r1 + 2) & 15];
3684 o->g_out = o->g_out2 = true;
3687 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3689 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3692 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3694 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3695 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3698 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3700 in1_la1(s, f, o);
3701 o->in1 = tcg_temp_new_i64();
3702 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3705 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3707 in1_la1(s, f, o);
3708 o->in1 = tcg_temp_new_i64();
3709 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3712 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3714 in1_la1(s, f, o);
3715 o->in1 = tcg_temp_new_i64();
3716 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3719 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3721 in1_la1(s, f, o);
3722 o->in1 = tcg_temp_new_i64();
3723 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3726 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3728 in1_la1(s, f, o);
3729 o->in1 = tcg_temp_new_i64();
3730 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3733 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3735 in1_la1(s, f, o);
3736 o->in1 = tcg_temp_new_i64();
3737 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3740 /* ====================================================================== */
3741 /* The "INput 2" generators. These load the second operand to an insn. */
3743 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3745 o->in2 = regs[get_field(f, r1)];
3746 o->g_in2 = true;
3749 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3751 o->in2 = tcg_temp_new_i64();
3752 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3755 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3757 o->in2 = tcg_temp_new_i64();
3758 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3761 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3763 o->in2 = load_reg(get_field(f, r2));
3766 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3768 o->in2 = regs[get_field(f, r2)];
3769 o->g_in2 = true;
3772 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3774 int r2 = get_field(f, r2);
3775 if (r2 != 0) {
3776 o->in2 = load_reg(r2);
3780 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3782 o->in2 = tcg_temp_new_i64();
3783 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3786 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3788 o->in2 = tcg_temp_new_i64();
3789 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3792 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3794 o->in2 = tcg_temp_new_i64();
3795 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3798 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3800 o->in2 = tcg_temp_new_i64();
3801 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3804 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3806 o->in2 = load_reg(get_field(f, r3));
3809 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3811 o->in2 = tcg_temp_new_i64();
3812 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3815 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3817 o->in2 = tcg_temp_new_i64();
3818 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3821 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3823 o->in2 = load_freg32_i64(get_field(f, r2));
3826 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3828 o->in2 = fregs[get_field(f, r2)];
3829 o->g_in2 = true;
3832 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3834 /* ??? Specification exception: r1 must be < 14. */
3835 int r2 = get_field(f, r2);
3836 o->in1 = fregs[r2];
3837 o->in2 = fregs[(r2 + 2) & 15];
3838 o->g_in1 = o->g_in2 = true;
3841 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3843 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3844 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3847 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3849 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3852 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3854 help_l2_shift(s, f, o, 31);
3857 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3859 help_l2_shift(s, f, o, 63);
3862 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3864 in2_a2(s, f, o);
3865 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3868 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3870 in2_a2(s, f, o);
3871 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3874 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3876 in2_a2(s, f, o);
3877 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3880 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3882 in2_a2(s, f, o);
3883 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3886 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3888 in2_a2(s, f, o);
3889 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3892 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3894 in2_a2(s, f, o);
3895 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3898 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3900 in2_ri2(s, f, o);
3901 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3904 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3906 in2_ri2(s, f, o);
3907 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3910 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3912 in2_ri2(s, f, o);
3913 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3916 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3918 in2_ri2(s, f, o);
3919 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3922 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3924 o->in2 = tcg_const_i64(get_field(f, i2));
3927 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3929 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3932 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3934 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3937 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3939 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3942 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3944 uint64_t i2 = (uint16_t)get_field(f, i2);
3945 o->in2 = tcg_const_i64(i2 << s->insn->data);
3948 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3950 uint64_t i2 = (uint32_t)get_field(f, i2);
3951 o->in2 = tcg_const_i64(i2 << s->insn->data);
3954 /* ====================================================================== */
3956 /* Find opc within the table of insns. This is formulated as a switch
3957 statement so that (1) we get compile-time notice of cut-paste errors
3958 for duplicated opcodes, and (2) the compiler generates the binary
3959 search tree, rather than us having to post-process the table. */
3961 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3962 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3964 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3966 enum DisasInsnEnum {
3967 #include "insn-data.def"
3970 #undef D
3971 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3972 .opc = OPC, \
3973 .fmt = FMT_##FT, \
3974 .fac = FAC_##FC, \
3975 .name = #NM, \
3976 .help_in1 = in1_##I1, \
3977 .help_in2 = in2_##I2, \
3978 .help_prep = prep_##P, \
3979 .help_wout = wout_##W, \
3980 .help_cout = cout_##CC, \
3981 .help_op = op_##OP, \
3982 .data = D \
3985 /* Allow 0 to be used for NULL in the table below. */
3986 #define in1_0 NULL
3987 #define in2_0 NULL
3988 #define prep_0 NULL
3989 #define wout_0 NULL
3990 #define cout_0 NULL
3991 #define op_0 NULL
3993 static const DisasInsn insn_info[] = {
3994 #include "insn-data.def"
3997 #undef D
3998 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3999 case OPC: return &insn_info[insn_ ## NM];
4001 static const DisasInsn *lookup_opc(uint16_t opc)
4003 switch (opc) {
4004 #include "insn-data.def"
4005 default:
4006 return NULL;
4010 #undef D
4011 #undef C
4013 /* Extract a field from the insn. The INSN should be left-aligned in
4014 the uint64_t so that we can more easily utilize the big-bit-endian
4015 definitions we extract from the Principals of Operation. */
4017 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4019 uint32_t r, m;
4021 if (f->size == 0) {
4022 return;
4025 /* Zero extract the field from the insn. */
4026 r = (insn << f->beg) >> (64 - f->size);
4028 /* Sign-extend, or un-swap the field as necessary. */
4029 switch (f->type) {
4030 case 0: /* unsigned */
4031 break;
4032 case 1: /* signed */
4033 assert(f->size <= 32);
4034 m = 1u << (f->size - 1);
4035 r = (r ^ m) - m;
4036 break;
4037 case 2: /* dl+dh split, signed 20 bit. */
4038 r = ((int8_t)r << 12) | (r >> 8);
4039 break;
4040 default:
4041 abort();
4044 /* Validate that the "compressed" encoding we selected above is valid.
4045 I.e. we havn't make two different original fields overlap. */
4046 assert(((o->presentC >> f->indexC) & 1) == 0);
4047 o->presentC |= 1 << f->indexC;
4048 o->presentO |= 1 << f->indexO;
4050 o->c[f->indexC] = r;
4053 /* Lookup the insn at the current PC, extracting the operands into O and
4054 returning the info struct for the insn. Returns NULL for invalid insn. */
4056 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4057 DisasFields *f)
4059 uint64_t insn, pc = s->pc;
4060 int op, op2, ilen;
4061 const DisasInsn *info;
4063 insn = ld_code2(env, pc);
4064 op = (insn >> 8) & 0xff;
4065 ilen = get_ilen(op);
4066 s->next_pc = s->pc + ilen;
4068 switch (ilen) {
4069 case 2:
4070 insn = insn << 48;
4071 break;
4072 case 4:
4073 insn = ld_code4(env, pc) << 32;
4074 break;
4075 case 6:
4076 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4077 break;
4078 default:
4079 abort();
4082 /* We can't actually determine the insn format until we've looked up
4083 the full insn opcode. Which we can't do without locating the
4084 secondary opcode. Assume by default that OP2 is at bit 40; for
4085 those smaller insns that don't actually have a secondary opcode
4086 this will correctly result in OP2 = 0. */
4087 switch (op) {
4088 case 0x01: /* E */
4089 case 0x80: /* S */
4090 case 0x82: /* S */
4091 case 0x93: /* S */
4092 case 0xb2: /* S, RRF, RRE */
4093 case 0xb3: /* RRE, RRD, RRF */
4094 case 0xb9: /* RRE, RRF */
4095 case 0xe5: /* SSE, SIL */
4096 op2 = (insn << 8) >> 56;
4097 break;
4098 case 0xa5: /* RI */
4099 case 0xa7: /* RI */
4100 case 0xc0: /* RIL */
4101 case 0xc2: /* RIL */
4102 case 0xc4: /* RIL */
4103 case 0xc6: /* RIL */
4104 case 0xc8: /* SSF */
4105 case 0xcc: /* RIL */
4106 op2 = (insn << 12) >> 60;
4107 break;
4108 case 0xd0 ... 0xdf: /* SS */
4109 case 0xe1: /* SS */
4110 case 0xe2: /* SS */
4111 case 0xe8: /* SS */
4112 case 0xe9: /* SS */
4113 case 0xea: /* SS */
4114 case 0xee ... 0xf3: /* SS */
4115 case 0xf8 ... 0xfd: /* SS */
4116 op2 = 0;
4117 break;
4118 default:
4119 op2 = (insn << 40) >> 56;
4120 break;
4123 memset(f, 0, sizeof(*f));
4124 f->op = op;
4125 f->op2 = op2;
4127 /* Lookup the instruction. */
4128 info = lookup_opc(op << 8 | op2);
4130 /* If we found it, extract the operands. */
4131 if (info != NULL) {
4132 DisasFormat fmt = info->fmt;
4133 int i;
4135 for (i = 0; i < NUM_C_FIELD; ++i) {
4136 extract_field(f, &format_info[fmt].op[i], insn);
4139 return info;
4142 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4144 const DisasInsn *insn;
4145 ExitStatus ret = NO_EXIT;
4146 DisasFields f;
4147 DisasOps o;
4149 insn = extract_insn(env, s, &f);
4151 /* If not found, try the old interpreter. This includes ILLOPC. */
4152 if (insn == NULL) {
4153 disas_s390_insn(env, s);
4154 switch (s->is_jmp) {
4155 case DISAS_NEXT:
4156 ret = NO_EXIT;
4157 break;
4158 case DISAS_TB_JUMP:
4159 ret = EXIT_GOTO_TB;
4160 break;
4161 case DISAS_JUMP:
4162 ret = EXIT_PC_UPDATED;
4163 break;
4164 case DISAS_EXCP:
4165 ret = EXIT_NORETURN;
4166 break;
4167 default:
4168 abort();
4171 s->pc = s->next_pc;
4172 return ret;
4175 /* Set up the strutures we use to communicate with the helpers. */
4176 s->insn = insn;
4177 s->fields = &f;
4178 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4179 TCGV_UNUSED_I64(o.out);
4180 TCGV_UNUSED_I64(o.out2);
4181 TCGV_UNUSED_I64(o.in1);
4182 TCGV_UNUSED_I64(o.in2);
4183 TCGV_UNUSED_I64(o.addr1);
4185 /* Implement the instruction. */
4186 if (insn->help_in1) {
4187 insn->help_in1(s, &f, &o);
4189 if (insn->help_in2) {
4190 insn->help_in2(s, &f, &o);
4192 if (insn->help_prep) {
4193 insn->help_prep(s, &f, &o);
4195 if (insn->help_op) {
4196 ret = insn->help_op(s, &o);
4198 if (insn->help_wout) {
4199 insn->help_wout(s, &f, &o);
4201 if (insn->help_cout) {
4202 insn->help_cout(s, &o);
4205 /* Free any temporaries created by the helpers. */
4206 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4207 tcg_temp_free_i64(o.out);
4209 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4210 tcg_temp_free_i64(o.out2);
4212 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4213 tcg_temp_free_i64(o.in1);
4215 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4216 tcg_temp_free_i64(o.in2);
4218 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4219 tcg_temp_free_i64(o.addr1);
4222 /* Advance to the next instruction. */
4223 s->pc = s->next_pc;
4224 return ret;
4227 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4228 TranslationBlock *tb,
4229 int search_pc)
4231 DisasContext dc;
4232 target_ulong pc_start;
4233 uint64_t next_page_start;
4234 uint16_t *gen_opc_end;
4235 int j, lj = -1;
4236 int num_insns, max_insns;
4237 CPUBreakpoint *bp;
4238 ExitStatus status;
4239 bool do_debug;
4241 pc_start = tb->pc;
4243 /* 31-bit mode */
4244 if (!(tb->flags & FLAG_MASK_64)) {
4245 pc_start &= 0x7fffffff;
4248 dc.tb = tb;
4249 dc.pc = pc_start;
4250 dc.cc_op = CC_OP_DYNAMIC;
4251 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4252 dc.is_jmp = DISAS_NEXT;
4254 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4256 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4258 num_insns = 0;
4259 max_insns = tb->cflags & CF_COUNT_MASK;
4260 if (max_insns == 0) {
4261 max_insns = CF_COUNT_MASK;
4264 gen_icount_start();
4266 do {
4267 if (search_pc) {
4268 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4269 if (lj < j) {
4270 lj++;
4271 while (lj < j) {
4272 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4275 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4276 gen_opc_cc_op[lj] = dc.cc_op;
4277 tcg_ctx.gen_opc_instr_start[lj] = 1;
4278 tcg_ctx.gen_opc_icount[lj] = num_insns;
4280 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4281 gen_io_start();
4284 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4285 tcg_gen_debug_insn_start(dc.pc);
4288 status = NO_EXIT;
4289 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4290 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4291 if (bp->pc == dc.pc) {
4292 status = EXIT_PC_STALE;
4293 do_debug = true;
4294 break;
4298 if (status == NO_EXIT) {
4299 status = translate_one(env, &dc);
4302 /* If we reach a page boundary, are single stepping,
4303 or exhaust instruction count, stop generation. */
4304 if (status == NO_EXIT
4305 && (dc.pc >= next_page_start
4306 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4307 || num_insns >= max_insns
4308 || singlestep
4309 || env->singlestep_enabled)) {
4310 status = EXIT_PC_STALE;
4312 } while (status == NO_EXIT);
4314 if (tb->cflags & CF_LAST_IO) {
4315 gen_io_end();
4318 switch (status) {
4319 case EXIT_GOTO_TB:
4320 case EXIT_NORETURN:
4321 break;
4322 case EXIT_PC_STALE:
4323 update_psw_addr(&dc);
4324 /* FALLTHRU */
4325 case EXIT_PC_UPDATED:
4326 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4327 gen_op_calc_cc(&dc);
4328 } else {
4329 /* Next TB starts off with CC_OP_DYNAMIC,
4330 so make sure the cc op type is in env */
4331 gen_op_set_cc_op(&dc);
4333 if (do_debug) {
4334 gen_exception(EXCP_DEBUG);
4335 } else {
4336 /* Generate the return instruction */
4337 tcg_gen_exit_tb(0);
4339 break;
4340 default:
4341 abort();
4344 gen_icount_end(tb, num_insns);
4345 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4346 if (search_pc) {
4347 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4348 lj++;
4349 while (lj <= j) {
4350 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4352 } else {
4353 tb->size = dc.pc - pc_start;
4354 tb->icount = num_insns;
4357 #if defined(S390X_DEBUG_DISAS)
4358 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4359 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4360 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4361 qemu_log("\n");
4363 #endif
4366 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4368 gen_intermediate_code_internal(env, tb, 0);
4371 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4373 gen_intermediate_code_internal(env, tb, 1);
4376 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4378 int cc_op;
4379 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4380 cc_op = gen_opc_cc_op[pc_pos];
4381 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4382 env->cc_op = cc_op;