target-s390: Convert FP LOAD COMPLIMENT, NEGATIVE, POSITIVE
[qemu-kvm.git] / target-s390x / translate.c
blobb6043d6701731a0be0c680358e065addd6faa848
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
73 #define DISAS_EXCP 4
75 static void gen_op_calc_cc(DisasContext *s);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
82 static inline void debug_insn(uint64_t insn)
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
94 return pc;
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
100 int i;
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
137 #endif
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
144 #endif
146 cpu_fprintf(f, "\n");
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
163 void s390x_translate_init(void)
165 int i;
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
203 static inline TCGv_i64 load_reg(int reg)
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
210 static inline TCGv_i64 load_freg(int reg)
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
217 static inline TCGv_i32 load_freg32(int reg)
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
228 static inline TCGv_i64 load_freg32_i64(int reg)
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
235 static inline TCGv_i32 load_reg32(int reg)
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
242 static inline TCGv_i64 load_reg32_i64(int reg)
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
249 static inline void store_reg(int reg, TCGv_i64 v)
251 tcg_gen_mov_i64(regs[reg], v);
254 static inline void store_freg(int reg, TCGv_i64 v)
256 tcg_gen_mov_i64(fregs[reg], v);
259 static inline void store_reg32(int reg, TCGv_i32 v)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
281 static inline void store_freg32(int reg, TCGv_i32 v)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
297 static inline void return_low128(TCGv_i64 dest)
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
302 static inline void update_psw_addr(DisasContext *s)
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
308 static inline void potential_page_fault(DisasContext *s)
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
318 return (uint64_t)cpu_lduw_code(env, pc);
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
331 static inline int get_mem_index(DisasContext *s)
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
346 static void gen_exception(int excp)
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
353 static void gen_program_exception(DisasContext *s, int code)
355 TCGv_i32 tmp;
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
370 /* Save off cc. */
371 gen_op_calc_cc(s);
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
380 static inline void gen_illegal_opcode(DisasContext *s)
382 gen_program_exception(s, PGM_SPECIFICATION);
385 static inline void check_privileged(DisasContext *s)
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
394 TCGv_i64 tmp;
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
418 } else {
419 tmp = tcg_const_i64(d2);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
427 return tmp;
430 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
432 s->cc_op = CC_OP_CONST0 + val;
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
488 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
489 enum cc_op cond)
491 gen_op_update2_cc_i32(s, cond, v1, v2);
494 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
495 enum cc_op cond)
497 gen_op_update2_cc_i64(s, cond, v1, v2);
500 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
502 cmp_32(s, v1, v2, CC_OP_LTGT_32);
505 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
507 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
510 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
512 /* XXX optimize for the constant? put it in s? */
513 TCGv_i32 tmp = tcg_const_i32(v2);
514 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
515 tcg_temp_free_i32(tmp);
518 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
520 TCGv_i32 tmp = tcg_const_i32(v2);
521 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
522 tcg_temp_free_i32(tmp);
525 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
527 cmp_64(s, v1, v2, CC_OP_LTGT_64);
530 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
532 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
535 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
537 TCGv_i64 tmp = tcg_const_i64(v2);
538 cmp_s64(s, v1, tmp);
539 tcg_temp_free_i64(tmp);
542 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
544 TCGv_i64 tmp = tcg_const_i64(v2);
545 cmp_u64(s, v1, tmp);
546 tcg_temp_free_i64(tmp);
549 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
551 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
554 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
556 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
559 /* CC value is in env->cc_op */
560 static inline void set_cc_static(DisasContext *s)
562 tcg_gen_discard_i64(cc_src);
563 tcg_gen_discard_i64(cc_dst);
564 tcg_gen_discard_i64(cc_vr);
565 s->cc_op = CC_OP_STATIC;
568 static inline void gen_op_set_cc_op(DisasContext *s)
570 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
571 tcg_gen_movi_i32(cc_op, s->cc_op);
575 static inline void gen_update_cc_op(DisasContext *s)
577 gen_op_set_cc_op(s);
580 /* calculates cc into cc_op */
581 static void gen_op_calc_cc(DisasContext *s)
583 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
584 TCGv_i64 dummy = tcg_const_i64(0);
586 switch (s->cc_op) {
587 case CC_OP_CONST0:
588 case CC_OP_CONST1:
589 case CC_OP_CONST2:
590 case CC_OP_CONST3:
591 /* s->cc_op is the cc value */
592 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
593 break;
594 case CC_OP_STATIC:
595 /* env->cc_op already is the cc value */
596 break;
597 case CC_OP_NZ:
598 case CC_OP_ABS_64:
599 case CC_OP_NABS_64:
600 case CC_OP_ABS_32:
601 case CC_OP_NABS_32:
602 case CC_OP_LTGT0_32:
603 case CC_OP_LTGT0_64:
604 case CC_OP_COMP_32:
605 case CC_OP_COMP_64:
606 case CC_OP_NZ_F32:
607 case CC_OP_NZ_F64:
608 /* 1 argument */
609 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
610 break;
611 case CC_OP_ICM:
612 case CC_OP_LTGT_32:
613 case CC_OP_LTGT_64:
614 case CC_OP_LTUGTU_32:
615 case CC_OP_LTUGTU_64:
616 case CC_OP_TM_32:
617 case CC_OP_TM_64:
618 case CC_OP_SLA_32:
619 case CC_OP_SLA_64:
620 case CC_OP_NZ_F128:
621 /* 2 arguments */
622 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
623 break;
624 case CC_OP_ADD_64:
625 case CC_OP_ADDU_64:
626 case CC_OP_ADDC_64:
627 case CC_OP_SUB_64:
628 case CC_OP_SUBU_64:
629 case CC_OP_SUBB_64:
630 case CC_OP_ADD_32:
631 case CC_OP_ADDU_32:
632 case CC_OP_ADDC_32:
633 case CC_OP_SUB_32:
634 case CC_OP_SUBU_32:
635 case CC_OP_SUBB_32:
636 /* 3 arguments */
637 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
638 break;
639 case CC_OP_DYNAMIC:
640 /* unknown operation - assume 3 arguments and cc_op in env */
641 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
642 break;
643 default:
644 tcg_abort();
647 tcg_temp_free_i32(local_cc_op);
648 tcg_temp_free_i64(dummy);
650 /* We now have cc in cc_op as constant */
651 set_cc_static(s);
654 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
656 debug_insn(insn);
658 *r1 = (insn >> 4) & 0xf;
659 *r2 = insn & 0xf;
662 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
663 int *x2, int *b2, int *d2)
665 debug_insn(insn);
667 *r1 = (insn >> 20) & 0xf;
668 *x2 = (insn >> 16) & 0xf;
669 *b2 = (insn >> 12) & 0xf;
670 *d2 = insn & 0xfff;
672 return get_address(s, *x2, *b2, *d2);
675 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
676 int *b2, int *d2)
678 debug_insn(insn);
680 *r1 = (insn >> 20) & 0xf;
681 /* aka m3 */
682 *r3 = (insn >> 16) & 0xf;
683 *b2 = (insn >> 12) & 0xf;
684 *d2 = insn & 0xfff;
687 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
688 int *b1, int *d1)
690 debug_insn(insn);
692 *i2 = (insn >> 16) & 0xff;
693 *b1 = (insn >> 12) & 0xf;
694 *d1 = insn & 0xfff;
696 return get_address(s, 0, *b1, *d1);
699 static int use_goto_tb(DisasContext *s, uint64_t dest)
701 /* NOTE: we handle the case where the TB spans two pages here */
702 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
703 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
704 && !s->singlestep_enabled
705 && !(s->tb->cflags & CF_LAST_IO));
708 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
710 gen_update_cc_op(s);
712 if (use_goto_tb(s, pc)) {
713 tcg_gen_goto_tb(tb_num);
714 tcg_gen_movi_i64(psw_addr, pc);
715 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
716 } else {
717 /* jump to another page: currently not optimized */
718 tcg_gen_movi_i64(psw_addr, pc);
719 tcg_gen_exit_tb(0);
723 static inline void account_noninline_branch(DisasContext *s, int cc_op)
725 #ifdef DEBUG_INLINE_BRANCHES
726 inline_branch_miss[cc_op]++;
727 #endif
730 static inline void account_inline_branch(DisasContext *s, int cc_op)
732 #ifdef DEBUG_INLINE_BRANCHES
733 inline_branch_hit[cc_op]++;
734 #endif
737 /* Table of mask values to comparison codes, given a comparison as input.
738 For a true comparison CC=3 will never be set, but we treat this
739 conservatively for possible use when CC=3 indicates overflow. */
740 static const TCGCond ltgt_cond[16] = {
741 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
742 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
743 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
744 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
745 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
746 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
747 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
748 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
751 /* Table of mask values to comparison codes, given a logic op as input.
752 For such, only CC=0 and CC=1 should be possible. */
753 static const TCGCond nz_cond[16] = {
754 /* | | x | x */
755 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
756 /* | NE | x | x */
757 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
758 /* EQ | | x | x */
759 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
760 /* EQ | NE | x | x */
761 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
764 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
765 details required to generate a TCG comparison. */
766 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
768 TCGCond cond;
769 enum cc_op old_cc_op = s->cc_op;
771 if (mask == 15 || mask == 0) {
772 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
773 c->u.s32.a = cc_op;
774 c->u.s32.b = cc_op;
775 c->g1 = c->g2 = true;
776 c->is_64 = false;
777 return;
780 /* Find the TCG condition for the mask + cc op. */
781 switch (old_cc_op) {
782 case CC_OP_LTGT0_32:
783 case CC_OP_LTGT0_64:
784 case CC_OP_LTGT_32:
785 case CC_OP_LTGT_64:
786 cond = ltgt_cond[mask];
787 if (cond == TCG_COND_NEVER) {
788 goto do_dynamic;
790 account_inline_branch(s, old_cc_op);
791 break;
793 case CC_OP_LTUGTU_32:
794 case CC_OP_LTUGTU_64:
795 cond = tcg_unsigned_cond(ltgt_cond[mask]);
796 if (cond == TCG_COND_NEVER) {
797 goto do_dynamic;
799 account_inline_branch(s, old_cc_op);
800 break;
802 case CC_OP_NZ:
803 cond = nz_cond[mask];
804 if (cond == TCG_COND_NEVER) {
805 goto do_dynamic;
807 account_inline_branch(s, old_cc_op);
808 break;
810 case CC_OP_TM_32:
811 case CC_OP_TM_64:
812 switch (mask) {
813 case 8:
814 cond = TCG_COND_EQ;
815 break;
816 case 4 | 2 | 1:
817 cond = TCG_COND_NE;
818 break;
819 default:
820 goto do_dynamic;
822 account_inline_branch(s, old_cc_op);
823 break;
825 case CC_OP_ICM:
826 switch (mask) {
827 case 8:
828 cond = TCG_COND_EQ;
829 break;
830 case 4 | 2 | 1:
831 case 4 | 2:
832 cond = TCG_COND_NE;
833 break;
834 default:
835 goto do_dynamic;
837 account_inline_branch(s, old_cc_op);
838 break;
840 default:
841 do_dynamic:
842 /* Calculate cc value. */
843 gen_op_calc_cc(s);
844 /* FALLTHRU */
846 case CC_OP_STATIC:
847 /* Jump based on CC. We'll load up the real cond below;
848 the assignment here merely avoids a compiler warning. */
849 account_noninline_branch(s, old_cc_op);
850 old_cc_op = CC_OP_STATIC;
851 cond = TCG_COND_NEVER;
852 break;
855 /* Load up the arguments of the comparison. */
856 c->is_64 = true;
857 c->g1 = c->g2 = false;
858 switch (old_cc_op) {
859 case CC_OP_LTGT0_32:
860 c->is_64 = false;
861 c->u.s32.a = tcg_temp_new_i32();
862 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
863 c->u.s32.b = tcg_const_i32(0);
864 break;
865 case CC_OP_LTGT_32:
866 case CC_OP_LTUGTU_32:
867 c->is_64 = false;
868 c->u.s32.a = tcg_temp_new_i32();
869 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
870 c->u.s32.b = tcg_temp_new_i32();
871 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
872 break;
874 case CC_OP_LTGT0_64:
875 case CC_OP_NZ:
876 c->u.s64.a = cc_dst;
877 c->u.s64.b = tcg_const_i64(0);
878 c->g1 = true;
879 break;
880 case CC_OP_LTGT_64:
881 case CC_OP_LTUGTU_64:
882 c->u.s64.a = cc_src;
883 c->u.s64.b = cc_dst;
884 c->g1 = c->g2 = true;
885 break;
887 case CC_OP_TM_32:
888 case CC_OP_TM_64:
889 case CC_OP_ICM:
890 c->u.s64.a = tcg_temp_new_i64();
891 c->u.s64.b = tcg_const_i64(0);
892 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
893 break;
895 case CC_OP_STATIC:
896 c->is_64 = false;
897 c->u.s32.a = cc_op;
898 c->g1 = true;
899 switch (mask) {
900 case 0x8 | 0x4 | 0x2: /* cc != 3 */
901 cond = TCG_COND_NE;
902 c->u.s32.b = tcg_const_i32(3);
903 break;
904 case 0x8 | 0x4 | 0x1: /* cc != 2 */
905 cond = TCG_COND_NE;
906 c->u.s32.b = tcg_const_i32(2);
907 break;
908 case 0x8 | 0x2 | 0x1: /* cc != 1 */
909 cond = TCG_COND_NE;
910 c->u.s32.b = tcg_const_i32(1);
911 break;
912 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
913 cond = TCG_COND_EQ;
914 c->g1 = false;
915 c->u.s32.a = tcg_temp_new_i32();
916 c->u.s32.b = tcg_const_i32(0);
917 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
918 break;
919 case 0x8 | 0x4: /* cc < 2 */
920 cond = TCG_COND_LTU;
921 c->u.s32.b = tcg_const_i32(2);
922 break;
923 case 0x8: /* cc == 0 */
924 cond = TCG_COND_EQ;
925 c->u.s32.b = tcg_const_i32(0);
926 break;
927 case 0x4 | 0x2 | 0x1: /* cc != 0 */
928 cond = TCG_COND_NE;
929 c->u.s32.b = tcg_const_i32(0);
930 break;
931 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
932 cond = TCG_COND_NE;
933 c->g1 = false;
934 c->u.s32.a = tcg_temp_new_i32();
935 c->u.s32.b = tcg_const_i32(0);
936 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
937 break;
938 case 0x4: /* cc == 1 */
939 cond = TCG_COND_EQ;
940 c->u.s32.b = tcg_const_i32(1);
941 break;
942 case 0x2 | 0x1: /* cc > 1 */
943 cond = TCG_COND_GTU;
944 c->u.s32.b = tcg_const_i32(1);
945 break;
946 case 0x2: /* cc == 2 */
947 cond = TCG_COND_EQ;
948 c->u.s32.b = tcg_const_i32(2);
949 break;
950 case 0x1: /* cc == 3 */
951 cond = TCG_COND_EQ;
952 c->u.s32.b = tcg_const_i32(3);
953 break;
954 default:
955 /* CC is masked by something else: (8 >> cc) & mask. */
956 cond = TCG_COND_NE;
957 c->g1 = false;
958 c->u.s32.a = tcg_const_i32(8);
959 c->u.s32.b = tcg_const_i32(0);
960 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
961 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
962 break;
964 break;
966 default:
967 abort();
969 c->cond = cond;
972 static void free_compare(DisasCompare *c)
974 if (!c->g1) {
975 if (c->is_64) {
976 tcg_temp_free_i64(c->u.s64.a);
977 } else {
978 tcg_temp_free_i32(c->u.s32.a);
981 if (!c->g2) {
982 if (c->is_64) {
983 tcg_temp_free_i64(c->u.s64.b);
984 } else {
985 tcg_temp_free_i32(c->u.s32.b);
990 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
991 uint32_t insn)
993 TCGv_i64 tmp, tmp2, tmp3;
994 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
995 int r1, r2;
996 #ifndef CONFIG_USER_ONLY
997 int r3, d2, b2;
998 #endif
1000 r1 = (insn >> 4) & 0xf;
1001 r2 = insn & 0xf;
1003 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1005 switch (op) {
1006 case 0x22: /* IPM R1 [RRE] */
1007 tmp32_1 = tcg_const_i32(r1);
1008 gen_op_calc_cc(s);
1009 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1010 tcg_temp_free_i32(tmp32_1);
1011 break;
1012 case 0x41: /* CKSM R1,R2 [RRE] */
1013 tmp32_1 = tcg_const_i32(r1);
1014 tmp32_2 = tcg_const_i32(r2);
1015 potential_page_fault(s);
1016 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1017 tcg_temp_free_i32(tmp32_1);
1018 tcg_temp_free_i32(tmp32_2);
1019 gen_op_movi_cc(s, 0);
1020 break;
1021 case 0x4e: /* SAR R1,R2 [RRE] */
1022 tmp32_1 = load_reg32(r2);
1023 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1024 tcg_temp_free_i32(tmp32_1);
1025 break;
1026 case 0x4f: /* EAR R1,R2 [RRE] */
1027 tmp32_1 = tcg_temp_new_i32();
1028 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1029 store_reg32(r1, tmp32_1);
1030 tcg_temp_free_i32(tmp32_1);
1031 break;
1032 case 0x54: /* MVPG R1,R2 [RRE] */
1033 tmp = load_reg(0);
1034 tmp2 = load_reg(r1);
1035 tmp3 = load_reg(r2);
1036 potential_page_fault(s);
1037 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1038 tcg_temp_free_i64(tmp);
1039 tcg_temp_free_i64(tmp2);
1040 tcg_temp_free_i64(tmp3);
1041 /* XXX check CCO bit and set CC accordingly */
1042 gen_op_movi_cc(s, 0);
1043 break;
1044 case 0x55: /* MVST R1,R2 [RRE] */
1045 tmp32_1 = load_reg32(0);
1046 tmp32_2 = tcg_const_i32(r1);
1047 tmp32_3 = tcg_const_i32(r2);
1048 potential_page_fault(s);
1049 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1050 tcg_temp_free_i32(tmp32_1);
1051 tcg_temp_free_i32(tmp32_2);
1052 tcg_temp_free_i32(tmp32_3);
1053 gen_op_movi_cc(s, 1);
1054 break;
1055 case 0x5d: /* CLST R1,R2 [RRE] */
1056 tmp32_1 = load_reg32(0);
1057 tmp32_2 = tcg_const_i32(r1);
1058 tmp32_3 = tcg_const_i32(r2);
1059 potential_page_fault(s);
1060 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1061 set_cc_static(s);
1062 tcg_temp_free_i32(tmp32_1);
1063 tcg_temp_free_i32(tmp32_2);
1064 tcg_temp_free_i32(tmp32_3);
1065 break;
1066 case 0x5e: /* SRST R1,R2 [RRE] */
1067 tmp32_1 = load_reg32(0);
1068 tmp32_2 = tcg_const_i32(r1);
1069 tmp32_3 = tcg_const_i32(r2);
1070 potential_page_fault(s);
1071 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1072 set_cc_static(s);
1073 tcg_temp_free_i32(tmp32_1);
1074 tcg_temp_free_i32(tmp32_2);
1075 tcg_temp_free_i32(tmp32_3);
1076 break;
1078 #ifndef CONFIG_USER_ONLY
1079 case 0x02: /* STIDP D2(B2) [S] */
1080 /* Store CPU ID */
1081 check_privileged(s);
1082 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1083 tmp = get_address(s, 0, b2, d2);
1084 potential_page_fault(s);
1085 gen_helper_stidp(cpu_env, tmp);
1086 tcg_temp_free_i64(tmp);
1087 break;
1088 case 0x04: /* SCK D2(B2) [S] */
1089 /* Set Clock */
1090 check_privileged(s);
1091 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1092 tmp = get_address(s, 0, b2, d2);
1093 potential_page_fault(s);
1094 gen_helper_sck(cc_op, tmp);
1095 set_cc_static(s);
1096 tcg_temp_free_i64(tmp);
1097 break;
1098 case 0x05: /* STCK D2(B2) [S] */
1099 /* Store Clock */
1100 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1101 tmp = get_address(s, 0, b2, d2);
1102 potential_page_fault(s);
1103 gen_helper_stck(cc_op, cpu_env, tmp);
1104 set_cc_static(s);
1105 tcg_temp_free_i64(tmp);
1106 break;
1107 case 0x06: /* SCKC D2(B2) [S] */
1108 /* Set Clock Comparator */
1109 check_privileged(s);
1110 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1111 tmp = get_address(s, 0, b2, d2);
1112 potential_page_fault(s);
1113 gen_helper_sckc(cpu_env, tmp);
1114 tcg_temp_free_i64(tmp);
1115 break;
1116 case 0x07: /* STCKC D2(B2) [S] */
1117 /* Store Clock Comparator */
1118 check_privileged(s);
1119 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1120 tmp = get_address(s, 0, b2, d2);
1121 potential_page_fault(s);
1122 gen_helper_stckc(cpu_env, tmp);
1123 tcg_temp_free_i64(tmp);
1124 break;
1125 case 0x08: /* SPT D2(B2) [S] */
1126 /* Set CPU Timer */
1127 check_privileged(s);
1128 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1129 tmp = get_address(s, 0, b2, d2);
1130 potential_page_fault(s);
1131 gen_helper_spt(cpu_env, tmp);
1132 tcg_temp_free_i64(tmp);
1133 break;
1134 case 0x09: /* STPT D2(B2) [S] */
1135 /* Store CPU Timer */
1136 check_privileged(s);
1137 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1138 tmp = get_address(s, 0, b2, d2);
1139 potential_page_fault(s);
1140 gen_helper_stpt(cpu_env, tmp);
1141 tcg_temp_free_i64(tmp);
1142 break;
1143 case 0x0a: /* SPKA D2(B2) [S] */
1144 /* Set PSW Key from Address */
1145 check_privileged(s);
1146 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1147 tmp = get_address(s, 0, b2, d2);
1148 tmp2 = tcg_temp_new_i64();
1149 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1150 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1151 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1152 tcg_temp_free_i64(tmp2);
1153 tcg_temp_free_i64(tmp);
1154 break;
1155 case 0x0d: /* PTLB [S] */
1156 /* Purge TLB */
1157 check_privileged(s);
1158 gen_helper_ptlb(cpu_env);
1159 break;
1160 case 0x10: /* SPX D2(B2) [S] */
1161 /* Set Prefix Register */
1162 check_privileged(s);
1163 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1164 tmp = get_address(s, 0, b2, d2);
1165 potential_page_fault(s);
1166 gen_helper_spx(cpu_env, tmp);
1167 tcg_temp_free_i64(tmp);
1168 break;
1169 case 0x11: /* STPX D2(B2) [S] */
1170 /* Store Prefix */
1171 check_privileged(s);
1172 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1173 tmp = get_address(s, 0, b2, d2);
1174 tmp2 = tcg_temp_new_i64();
1175 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1176 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1177 tcg_temp_free_i64(tmp);
1178 tcg_temp_free_i64(tmp2);
1179 break;
1180 case 0x12: /* STAP D2(B2) [S] */
1181 /* Store CPU Address */
1182 check_privileged(s);
1183 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1184 tmp = get_address(s, 0, b2, d2);
1185 tmp2 = tcg_temp_new_i64();
1186 tmp32_1 = tcg_temp_new_i32();
1187 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1188 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1189 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1190 tcg_temp_free_i64(tmp);
1191 tcg_temp_free_i64(tmp2);
1192 tcg_temp_free_i32(tmp32_1);
1193 break;
1194 case 0x21: /* IPTE R1,R2 [RRE] */
1195 /* Invalidate PTE */
1196 check_privileged(s);
1197 r1 = (insn >> 4) & 0xf;
1198 r2 = insn & 0xf;
1199 tmp = load_reg(r1);
1200 tmp2 = load_reg(r2);
1201 gen_helper_ipte(cpu_env, tmp, tmp2);
1202 tcg_temp_free_i64(tmp);
1203 tcg_temp_free_i64(tmp2);
1204 break;
1205 case 0x29: /* ISKE R1,R2 [RRE] */
1206 /* Insert Storage Key Extended */
1207 check_privileged(s);
1208 r1 = (insn >> 4) & 0xf;
1209 r2 = insn & 0xf;
1210 tmp = load_reg(r2);
1211 tmp2 = tcg_temp_new_i64();
1212 gen_helper_iske(tmp2, cpu_env, tmp);
1213 store_reg(r1, tmp2);
1214 tcg_temp_free_i64(tmp);
1215 tcg_temp_free_i64(tmp2);
1216 break;
1217 case 0x2a: /* RRBE R1,R2 [RRE] */
1218 /* Set Storage Key Extended */
1219 check_privileged(s);
1220 r1 = (insn >> 4) & 0xf;
1221 r2 = insn & 0xf;
1222 tmp32_1 = load_reg32(r1);
1223 tmp = load_reg(r2);
1224 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1225 set_cc_static(s);
1226 tcg_temp_free_i32(tmp32_1);
1227 tcg_temp_free_i64(tmp);
1228 break;
1229 case 0x2b: /* SSKE R1,R2 [RRE] */
1230 /* Set Storage Key Extended */
1231 check_privileged(s);
1232 r1 = (insn >> 4) & 0xf;
1233 r2 = insn & 0xf;
1234 tmp32_1 = load_reg32(r1);
1235 tmp = load_reg(r2);
1236 gen_helper_sske(cpu_env, tmp32_1, tmp);
1237 tcg_temp_free_i32(tmp32_1);
1238 tcg_temp_free_i64(tmp);
1239 break;
1240 case 0x34: /* STCH ? */
1241 /* Store Subchannel */
1242 check_privileged(s);
1243 gen_op_movi_cc(s, 3);
1244 break;
1245 case 0x46: /* STURA R1,R2 [RRE] */
1246 /* Store Using Real Address */
1247 check_privileged(s);
1248 r1 = (insn >> 4) & 0xf;
1249 r2 = insn & 0xf;
1250 tmp32_1 = load_reg32(r1);
1251 tmp = load_reg(r2);
1252 potential_page_fault(s);
1253 gen_helper_stura(cpu_env, tmp, tmp32_1);
1254 tcg_temp_free_i32(tmp32_1);
1255 tcg_temp_free_i64(tmp);
1256 break;
1257 case 0x50: /* CSP R1,R2 [RRE] */
1258 /* Compare And Swap And Purge */
1259 check_privileged(s);
1260 r1 = (insn >> 4) & 0xf;
1261 r2 = insn & 0xf;
1262 tmp32_1 = tcg_const_i32(r1);
1263 tmp32_2 = tcg_const_i32(r2);
1264 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1265 set_cc_static(s);
1266 tcg_temp_free_i32(tmp32_1);
1267 tcg_temp_free_i32(tmp32_2);
1268 break;
1269 case 0x5f: /* CHSC ? */
1270 /* Channel Subsystem Call */
1271 check_privileged(s);
1272 gen_op_movi_cc(s, 3);
1273 break;
1274 case 0x78: /* STCKE D2(B2) [S] */
1275 /* Store Clock Extended */
1276 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1277 tmp = get_address(s, 0, b2, d2);
1278 potential_page_fault(s);
1279 gen_helper_stcke(cc_op, cpu_env, tmp);
1280 set_cc_static(s);
1281 tcg_temp_free_i64(tmp);
1282 break;
1283 case 0x79: /* SACF D2(B2) [S] */
1284 /* Set Address Space Control Fast */
1285 check_privileged(s);
1286 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1287 tmp = get_address(s, 0, b2, d2);
1288 potential_page_fault(s);
1289 gen_helper_sacf(cpu_env, tmp);
1290 tcg_temp_free_i64(tmp);
1291 /* addressing mode has changed, so end the block */
1292 s->pc = s->next_pc;
1293 update_psw_addr(s);
1294 s->is_jmp = DISAS_JUMP;
1295 break;
1296 case 0x7d: /* STSI D2,(B2) [S] */
1297 check_privileged(s);
1298 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1299 tmp = get_address(s, 0, b2, d2);
1300 tmp32_1 = load_reg32(0);
1301 tmp32_2 = load_reg32(1);
1302 potential_page_fault(s);
1303 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1304 set_cc_static(s);
1305 tcg_temp_free_i64(tmp);
1306 tcg_temp_free_i32(tmp32_1);
1307 tcg_temp_free_i32(tmp32_2);
1308 break;
1309 case 0x9d: /* LFPC D2(B2) [S] */
1310 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1311 tmp = get_address(s, 0, b2, d2);
1312 tmp2 = tcg_temp_new_i64();
1313 tmp32_1 = tcg_temp_new_i32();
1314 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1315 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1316 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1317 tcg_temp_free_i64(tmp);
1318 tcg_temp_free_i64(tmp2);
1319 tcg_temp_free_i32(tmp32_1);
1320 break;
1321 case 0xb1: /* STFL D2(B2) [S] */
1322 /* Store Facility List (CPU features) at 200 */
1323 check_privileged(s);
1324 tmp2 = tcg_const_i64(0xc0000000);
1325 tmp = tcg_const_i64(200);
1326 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1327 tcg_temp_free_i64(tmp2);
1328 tcg_temp_free_i64(tmp);
1329 break;
1330 case 0xb2: /* LPSWE D2(B2) [S] */
1331 /* Load PSW Extended */
1332 check_privileged(s);
1333 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1334 tmp = get_address(s, 0, b2, d2);
1335 tmp2 = tcg_temp_new_i64();
1336 tmp3 = tcg_temp_new_i64();
1337 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1338 tcg_gen_addi_i64(tmp, tmp, 8);
1339 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1340 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1341 /* we need to keep cc_op intact */
1342 s->is_jmp = DISAS_JUMP;
1343 tcg_temp_free_i64(tmp);
1344 tcg_temp_free_i64(tmp2);
1345 tcg_temp_free_i64(tmp3);
1346 break;
1347 case 0x20: /* SERVC R1,R2 [RRE] */
1348 /* SCLP Service call (PV hypercall) */
1349 check_privileged(s);
1350 potential_page_fault(s);
1351 tmp32_1 = load_reg32(r2);
1352 tmp = load_reg(r1);
1353 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1354 set_cc_static(s);
1355 tcg_temp_free_i32(tmp32_1);
1356 tcg_temp_free_i64(tmp);
1357 break;
1358 #endif
1359 default:
1360 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1361 gen_illegal_opcode(s);
1362 break;
1366 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1367 int r1, int r2)
1369 TCGv_i64 tmp;
1370 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1371 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1372 #define FP_HELPER(i) \
1373 tmp32_1 = tcg_const_i32(r1); \
1374 tmp32_2 = tcg_const_i32(r2); \
1375 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1376 tcg_temp_free_i32(tmp32_1); \
1377 tcg_temp_free_i32(tmp32_2);
1379 #define FP_HELPER_CC(i) \
1380 tmp32_1 = tcg_const_i32(r1); \
1381 tmp32_2 = tcg_const_i32(r2); \
1382 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1383 set_cc_static(s); \
1384 tcg_temp_free_i32(tmp32_1); \
1385 tcg_temp_free_i32(tmp32_2);
1387 switch (op) {
1388 case 0x15: /* SQBDR R1,R2 [RRE] */
1389 FP_HELPER(sqdbr);
1390 break;
1391 case 0x74: /* LZER R1 [RRE] */
1392 tmp32_1 = tcg_const_i32(r1);
1393 gen_helper_lzer(cpu_env, tmp32_1);
1394 tcg_temp_free_i32(tmp32_1);
1395 break;
1396 case 0x75: /* LZDR R1 [RRE] */
1397 tmp32_1 = tcg_const_i32(r1);
1398 gen_helper_lzdr(cpu_env, tmp32_1);
1399 tcg_temp_free_i32(tmp32_1);
1400 break;
1401 case 0x76: /* LZXR R1 [RRE] */
1402 tmp32_1 = tcg_const_i32(r1);
1403 gen_helper_lzxr(cpu_env, tmp32_1);
1404 tcg_temp_free_i32(tmp32_1);
1405 break;
1406 case 0x84: /* SFPC R1 [RRE] */
1407 tmp32_1 = load_reg32(r1);
1408 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1409 tcg_temp_free_i32(tmp32_1);
1410 break;
1411 case 0x94: /* CEFBR R1,R2 [RRE] */
1412 case 0x95: /* CDFBR R1,R2 [RRE] */
1413 case 0x96: /* CXFBR R1,R2 [RRE] */
1414 tmp32_1 = tcg_const_i32(r1);
1415 tmp32_2 = load_reg32(r2);
1416 switch (op) {
1417 case 0x94:
1418 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
1419 break;
1420 case 0x95:
1421 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
1422 break;
1423 case 0x96:
1424 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
1425 break;
1426 default:
1427 tcg_abort();
1429 tcg_temp_free_i32(tmp32_1);
1430 tcg_temp_free_i32(tmp32_2);
1431 break;
1432 case 0x98: /* CFEBR R1,R2 [RRE] */
1433 case 0x99: /* CFDBR R1,R2 [RRE] */
1434 case 0x9a: /* CFXBR R1,R2 [RRE] */
1435 tmp32_1 = tcg_const_i32(r1);
1436 tmp32_2 = tcg_const_i32(r2);
1437 tmp32_3 = tcg_const_i32(m3);
1438 switch (op) {
1439 case 0x98:
1440 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1441 break;
1442 case 0x99:
1443 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1444 break;
1445 case 0x9a:
1446 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1447 break;
1448 default:
1449 tcg_abort();
1451 set_cc_static(s);
1452 tcg_temp_free_i32(tmp32_1);
1453 tcg_temp_free_i32(tmp32_2);
1454 tcg_temp_free_i32(tmp32_3);
1455 break;
1456 case 0xa4: /* CEGBR R1,R2 [RRE] */
1457 case 0xa5: /* CDGBR R1,R2 [RRE] */
1458 tmp32_1 = tcg_const_i32(r1);
1459 tmp = load_reg(r2);
1460 switch (op) {
1461 case 0xa4:
1462 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
1463 break;
1464 case 0xa5:
1465 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
1466 break;
1467 default:
1468 tcg_abort();
1470 tcg_temp_free_i32(tmp32_1);
1471 tcg_temp_free_i64(tmp);
1472 break;
1473 case 0xa6: /* CXGBR R1,R2 [RRE] */
1474 tmp32_1 = tcg_const_i32(r1);
1475 tmp = load_reg(r2);
1476 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
1477 tcg_temp_free_i32(tmp32_1);
1478 tcg_temp_free_i64(tmp);
1479 break;
1480 case 0xa8: /* CGEBR R1,R2 [RRE] */
1481 tmp32_1 = tcg_const_i32(r1);
1482 tmp32_2 = tcg_const_i32(r2);
1483 tmp32_3 = tcg_const_i32(m3);
1484 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1485 set_cc_static(s);
1486 tcg_temp_free_i32(tmp32_1);
1487 tcg_temp_free_i32(tmp32_2);
1488 tcg_temp_free_i32(tmp32_3);
1489 break;
1490 case 0xa9: /* CGDBR R1,R2 [RRE] */
1491 tmp32_1 = tcg_const_i32(r1);
1492 tmp32_2 = tcg_const_i32(r2);
1493 tmp32_3 = tcg_const_i32(m3);
1494 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1495 set_cc_static(s);
1496 tcg_temp_free_i32(tmp32_1);
1497 tcg_temp_free_i32(tmp32_2);
1498 tcg_temp_free_i32(tmp32_3);
1499 break;
1500 case 0xaa: /* CGXBR R1,R2 [RRE] */
1501 tmp32_1 = tcg_const_i32(r1);
1502 tmp32_2 = tcg_const_i32(r2);
1503 tmp32_3 = tcg_const_i32(m3);
1504 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1505 set_cc_static(s);
1506 tcg_temp_free_i32(tmp32_1);
1507 tcg_temp_free_i32(tmp32_2);
1508 tcg_temp_free_i32(tmp32_3);
1509 break;
1510 default:
1511 LOG_DISAS("illegal b3 operation 0x%x\n", op);
1512 gen_illegal_opcode(s);
1513 break;
1516 #undef FP_HELPER_CC
1517 #undef FP_HELPER
1520 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
1521 int r2)
1523 TCGv_i64 tmp;
1524 TCGv_i32 tmp32_1;
1526 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1527 switch (op) {
1528 case 0x83: /* FLOGR R1,R2 [RRE] */
1529 tmp = load_reg(r2);
1530 tmp32_1 = tcg_const_i32(r1);
1531 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
1532 set_cc_static(s);
1533 tcg_temp_free_i64(tmp);
1534 tcg_temp_free_i32(tmp32_1);
1535 break;
1536 default:
1537 LOG_DISAS("illegal b9 operation 0x%x\n", op);
1538 gen_illegal_opcode(s);
1539 break;
1543 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1545 unsigned char opc;
1546 uint64_t insn;
1547 int op, r1, r2, r3;
1549 opc = cpu_ldub_code(env, s->pc);
1550 LOG_DISAS("opc 0x%x\n", opc);
1552 switch (opc) {
1553 case 0xb2:
1554 insn = ld_code4(env, s->pc);
1555 op = (insn >> 16) & 0xff;
1556 disas_b2(env, s, op, insn);
1557 break;
1558 case 0xb3:
1559 insn = ld_code4(env, s->pc);
1560 op = (insn >> 16) & 0xff;
1561 r3 = (insn >> 12) & 0xf; /* aka m3 */
1562 r1 = (insn >> 4) & 0xf;
1563 r2 = insn & 0xf;
1564 disas_b3(env, s, op, r3, r1, r2);
1565 break;
1566 case 0xb9:
1567 insn = ld_code4(env, s->pc);
1568 r1 = (insn >> 4) & 0xf;
1569 r2 = insn & 0xf;
1570 op = (insn >> 16) & 0xff;
1571 disas_b9(env, s, op, r1, r2);
1572 break;
1573 default:
1574 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1575 gen_illegal_opcode(s);
1576 break;
1580 /* ====================================================================== */
1581 /* Define the insn format enumeration. */
1582 #define F0(N) FMT_##N,
1583 #define F1(N, X1) F0(N)
1584 #define F2(N, X1, X2) F0(N)
1585 #define F3(N, X1, X2, X3) F0(N)
1586 #define F4(N, X1, X2, X3, X4) F0(N)
1587 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1589 typedef enum {
1590 #include "insn-format.def"
1591 } DisasFormat;
1593 #undef F0
1594 #undef F1
1595 #undef F2
1596 #undef F3
1597 #undef F4
1598 #undef F5
1600 /* Define a structure to hold the decoded fields. We'll store each inside
1601 an array indexed by an enum. In order to conserve memory, we'll arrange
1602 for fields that do not exist at the same time to overlap, thus the "C"
1603 for compact. For checking purposes there is an "O" for original index
1604 as well that will be applied to availability bitmaps. */
1606 enum DisasFieldIndexO {
1607 FLD_O_r1,
1608 FLD_O_r2,
1609 FLD_O_r3,
1610 FLD_O_m1,
1611 FLD_O_m3,
1612 FLD_O_m4,
1613 FLD_O_b1,
1614 FLD_O_b2,
1615 FLD_O_b4,
1616 FLD_O_d1,
1617 FLD_O_d2,
1618 FLD_O_d4,
1619 FLD_O_x2,
1620 FLD_O_l1,
1621 FLD_O_l2,
1622 FLD_O_i1,
1623 FLD_O_i2,
1624 FLD_O_i3,
1625 FLD_O_i4,
1626 FLD_O_i5
1629 enum DisasFieldIndexC {
1630 FLD_C_r1 = 0,
1631 FLD_C_m1 = 0,
1632 FLD_C_b1 = 0,
1633 FLD_C_i1 = 0,
1635 FLD_C_r2 = 1,
1636 FLD_C_b2 = 1,
1637 FLD_C_i2 = 1,
1639 FLD_C_r3 = 2,
1640 FLD_C_m3 = 2,
1641 FLD_C_i3 = 2,
1643 FLD_C_m4 = 3,
1644 FLD_C_b4 = 3,
1645 FLD_C_i4 = 3,
1646 FLD_C_l1 = 3,
1648 FLD_C_i5 = 4,
1649 FLD_C_d1 = 4,
1651 FLD_C_d2 = 5,
1653 FLD_C_d4 = 6,
1654 FLD_C_x2 = 6,
1655 FLD_C_l2 = 6,
1657 NUM_C_FIELD = 7
1660 struct DisasFields {
1661 unsigned op:8;
1662 unsigned op2:8;
1663 unsigned presentC:16;
1664 unsigned int presentO;
1665 int c[NUM_C_FIELD];
1668 /* This is the way fields are to be accessed out of DisasFields. */
1669 #define have_field(S, F) have_field1((S), FLD_O_##F)
1670 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1672 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1674 return (f->presentO >> c) & 1;
1677 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1678 enum DisasFieldIndexC c)
1680 assert(have_field1(f, o));
1681 return f->c[c];
1684 /* Describe the layout of each field in each format. */
1685 typedef struct DisasField {
1686 unsigned int beg:8;
1687 unsigned int size:8;
1688 unsigned int type:2;
1689 unsigned int indexC:6;
1690 enum DisasFieldIndexO indexO:8;
1691 } DisasField;
1693 typedef struct DisasFormatInfo {
1694 DisasField op[NUM_C_FIELD];
1695 } DisasFormatInfo;
1697 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1698 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1699 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1700 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1701 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1702 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1703 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1704 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1705 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1706 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1707 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1708 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1709 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1710 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1712 #define F0(N) { { } },
1713 #define F1(N, X1) { { X1 } },
1714 #define F2(N, X1, X2) { { X1, X2 } },
1715 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1716 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1717 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1719 static const DisasFormatInfo format_info[] = {
1720 #include "insn-format.def"
1723 #undef F0
1724 #undef F1
1725 #undef F2
1726 #undef F3
1727 #undef F4
1728 #undef F5
1729 #undef R
1730 #undef M
1731 #undef BD
1732 #undef BXD
1733 #undef BDL
1734 #undef BXDL
1735 #undef I
1736 #undef L
1738 /* Generally, we'll extract operands into this structures, operate upon
1739 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1740 of routines below for more details. */
1741 typedef struct {
1742 bool g_out, g_out2, g_in1, g_in2;
1743 TCGv_i64 out, out2, in1, in2;
1744 TCGv_i64 addr1;
1745 } DisasOps;
1747 /* Return values from translate_one, indicating the state of the TB. */
1748 typedef enum {
1749 /* Continue the TB. */
1750 NO_EXIT,
1751 /* We have emitted one or more goto_tb. No fixup required. */
1752 EXIT_GOTO_TB,
1753 /* We are not using a goto_tb (for whatever reason), but have updated
1754 the PC (for whatever reason), so there's no need to do it again on
1755 exiting the TB. */
1756 EXIT_PC_UPDATED,
1757 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1758 updated the PC for the next instruction to be executed. */
1759 EXIT_PC_STALE,
1760 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1761 No following code will be executed. */
1762 EXIT_NORETURN,
1763 } ExitStatus;
1765 typedef enum DisasFacility {
1766 FAC_Z, /* zarch (default) */
1767 FAC_CASS, /* compare and swap and store */
1768 FAC_CASS2, /* compare and swap and store 2*/
1769 FAC_DFP, /* decimal floating point */
1770 FAC_DFPR, /* decimal floating point rounding */
1771 FAC_DO, /* distinct operands */
1772 FAC_EE, /* execute extensions */
1773 FAC_EI, /* extended immediate */
1774 FAC_FPE, /* floating point extension */
1775 FAC_FPSSH, /* floating point support sign handling */
1776 FAC_FPRGR, /* FPR-GR transfer */
1777 FAC_GIE, /* general instructions extension */
1778 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1779 FAC_HW, /* high-word */
1780 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1781 FAC_LOC, /* load/store on condition */
1782 FAC_LD, /* long displacement */
1783 FAC_PC, /* population count */
1784 FAC_SCF, /* store clock fast */
1785 FAC_SFLE, /* store facility list extended */
1786 } DisasFacility;
1788 struct DisasInsn {
1789 unsigned opc:16;
1790 DisasFormat fmt:6;
1791 DisasFacility fac:6;
1793 const char *name;
1795 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1796 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1797 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1798 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1799 void (*help_cout)(DisasContext *, DisasOps *);
1800 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1802 uint64_t data;
1805 /* ====================================================================== */
1806 /* Miscelaneous helpers, used by several operations. */
1808 static void help_l2_shift(DisasContext *s, DisasFields *f,
1809 DisasOps *o, int mask)
1811 int b2 = get_field(f, b2);
1812 int d2 = get_field(f, d2);
1814 if (b2 == 0) {
1815 o->in2 = tcg_const_i64(d2 & mask);
1816 } else {
1817 o->in2 = get_address(s, 0, b2, d2);
1818 tcg_gen_andi_i64(o->in2, o->in2, mask);
1822 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1824 if (dest == s->next_pc) {
1825 return NO_EXIT;
1827 if (use_goto_tb(s, dest)) {
1828 gen_update_cc_op(s);
1829 tcg_gen_goto_tb(0);
1830 tcg_gen_movi_i64(psw_addr, dest);
1831 tcg_gen_exit_tb((tcg_target_long)s->tb);
1832 return EXIT_GOTO_TB;
1833 } else {
1834 tcg_gen_movi_i64(psw_addr, dest);
1835 return EXIT_PC_UPDATED;
1839 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1840 bool is_imm, int imm, TCGv_i64 cdest)
1842 ExitStatus ret;
1843 uint64_t dest = s->pc + 2 * imm;
1844 int lab;
1846 /* Take care of the special cases first. */
1847 if (c->cond == TCG_COND_NEVER) {
1848 ret = NO_EXIT;
1849 goto egress;
1851 if (is_imm) {
1852 if (dest == s->next_pc) {
1853 /* Branch to next. */
1854 ret = NO_EXIT;
1855 goto egress;
1857 if (c->cond == TCG_COND_ALWAYS) {
1858 ret = help_goto_direct(s, dest);
1859 goto egress;
1861 } else {
1862 if (TCGV_IS_UNUSED_I64(cdest)) {
1863 /* E.g. bcr %r0 -> no branch. */
1864 ret = NO_EXIT;
1865 goto egress;
1867 if (c->cond == TCG_COND_ALWAYS) {
1868 tcg_gen_mov_i64(psw_addr, cdest);
1869 ret = EXIT_PC_UPDATED;
1870 goto egress;
1874 if (use_goto_tb(s, s->next_pc)) {
1875 if (is_imm && use_goto_tb(s, dest)) {
1876 /* Both exits can use goto_tb. */
1877 gen_update_cc_op(s);
1879 lab = gen_new_label();
1880 if (c->is_64) {
1881 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1882 } else {
1883 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1886 /* Branch not taken. */
1887 tcg_gen_goto_tb(0);
1888 tcg_gen_movi_i64(psw_addr, s->next_pc);
1889 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1891 /* Branch taken. */
1892 gen_set_label(lab);
1893 tcg_gen_goto_tb(1);
1894 tcg_gen_movi_i64(psw_addr, dest);
1895 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1897 ret = EXIT_GOTO_TB;
1898 } else {
1899 /* Fallthru can use goto_tb, but taken branch cannot. */
1900 /* Store taken branch destination before the brcond. This
1901 avoids having to allocate a new local temp to hold it.
1902 We'll overwrite this in the not taken case anyway. */
1903 if (!is_imm) {
1904 tcg_gen_mov_i64(psw_addr, cdest);
1907 lab = gen_new_label();
1908 if (c->is_64) {
1909 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1910 } else {
1911 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1914 /* Branch not taken. */
1915 gen_update_cc_op(s);
1916 tcg_gen_goto_tb(0);
1917 tcg_gen_movi_i64(psw_addr, s->next_pc);
1918 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1920 gen_set_label(lab);
1921 if (is_imm) {
1922 tcg_gen_movi_i64(psw_addr, dest);
1924 ret = EXIT_PC_UPDATED;
1926 } else {
1927 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1928 Most commonly we're single-stepping or some other condition that
1929 disables all use of goto_tb. Just update the PC and exit. */
1931 TCGv_i64 next = tcg_const_i64(s->next_pc);
1932 if (is_imm) {
1933 cdest = tcg_const_i64(dest);
1936 if (c->is_64) {
1937 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1938 cdest, next);
1939 } else {
1940 TCGv_i32 t0 = tcg_temp_new_i32();
1941 TCGv_i64 t1 = tcg_temp_new_i64();
1942 TCGv_i64 z = tcg_const_i64(0);
1943 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1944 tcg_gen_extu_i32_i64(t1, t0);
1945 tcg_temp_free_i32(t0);
1946 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1947 tcg_temp_free_i64(t1);
1948 tcg_temp_free_i64(z);
1951 if (is_imm) {
1952 tcg_temp_free_i64(cdest);
1954 tcg_temp_free_i64(next);
1956 ret = EXIT_PC_UPDATED;
1959 egress:
1960 free_compare(c);
1961 return ret;
1964 /* ====================================================================== */
1965 /* The operations. These perform the bulk of the work for any insn,
1966 usually after the operands have been loaded and output initialized. */
1968 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1970 gen_helper_abs_i64(o->out, o->in2);
1971 return NO_EXIT;
1974 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1976 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1977 return NO_EXIT;
1980 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1982 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1983 return NO_EXIT;
1986 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1988 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1989 tcg_gen_mov_i64(o->out2, o->in2);
1990 return NO_EXIT;
1993 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1995 tcg_gen_add_i64(o->out, o->in1, o->in2);
1996 return NO_EXIT;
1999 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
2001 TCGv_i64 cc;
2003 tcg_gen_add_i64(o->out, o->in1, o->in2);
2005 /* XXX possible optimization point */
2006 gen_op_calc_cc(s);
2007 cc = tcg_temp_new_i64();
2008 tcg_gen_extu_i32_i64(cc, cc_op);
2009 tcg_gen_shri_i64(cc, cc, 1);
2011 tcg_gen_add_i64(o->out, o->out, cc);
2012 tcg_temp_free_i64(cc);
2013 return NO_EXIT;
2016 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
2018 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
2019 return NO_EXIT;
2022 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
2024 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
2025 return NO_EXIT;
2028 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
2030 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2031 return_low128(o->out2);
2032 return NO_EXIT;
2035 static ExitStatus op_and(DisasContext *s, DisasOps *o)
2037 tcg_gen_and_i64(o->out, o->in1, o->in2);
2038 return NO_EXIT;
2041 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
2043 int shift = s->insn->data & 0xff;
2044 int size = s->insn->data >> 8;
2045 uint64_t mask = ((1ull << size) - 1) << shift;
2047 assert(!o->g_in2);
2048 tcg_gen_shli_i64(o->in2, o->in2, shift);
2049 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2050 tcg_gen_and_i64(o->out, o->in1, o->in2);
2052 /* Produce the CC from only the bits manipulated. */
2053 tcg_gen_andi_i64(cc_dst, o->out, mask);
2054 set_cc_nz_u64(s, cc_dst);
2055 return NO_EXIT;
2058 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
2060 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2061 if (!TCGV_IS_UNUSED_I64(o->in2)) {
2062 tcg_gen_mov_i64(psw_addr, o->in2);
2063 return EXIT_PC_UPDATED;
2064 } else {
2065 return NO_EXIT;
2069 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
2071 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2072 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
2075 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
2077 int m1 = get_field(s->fields, m1);
2078 bool is_imm = have_field(s->fields, i2);
2079 int imm = is_imm ? get_field(s->fields, i2) : 0;
2080 DisasCompare c;
2082 disas_jcc(s, &c, m1);
2083 return help_branch(s, &c, is_imm, imm, o->in2);
2086 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
2088 int r1 = get_field(s->fields, r1);
2089 bool is_imm = have_field(s->fields, i2);
2090 int imm = is_imm ? get_field(s->fields, i2) : 0;
2091 DisasCompare c;
2092 TCGv_i64 t;
2094 c.cond = TCG_COND_NE;
2095 c.is_64 = false;
2096 c.g1 = false;
2097 c.g2 = false;
2099 t = tcg_temp_new_i64();
2100 tcg_gen_subi_i64(t, regs[r1], 1);
2101 store_reg32_i64(r1, t);
2102 c.u.s32.a = tcg_temp_new_i32();
2103 c.u.s32.b = tcg_const_i32(0);
2104 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2105 tcg_temp_free_i64(t);
2107 return help_branch(s, &c, is_imm, imm, o->in2);
2110 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2112 int r1 = get_field(s->fields, r1);
2113 bool is_imm = have_field(s->fields, i2);
2114 int imm = is_imm ? get_field(s->fields, i2) : 0;
2115 DisasCompare c;
2117 c.cond = TCG_COND_NE;
2118 c.is_64 = true;
2119 c.g1 = true;
2120 c.g2 = false;
2122 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2123 c.u.s64.a = regs[r1];
2124 c.u.s64.b = tcg_const_i64(0);
2126 return help_branch(s, &c, is_imm, imm, o->in2);
2129 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
2131 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
2132 set_cc_static(s);
2133 return NO_EXIT;
2136 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
2138 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
2139 set_cc_static(s);
2140 return NO_EXIT;
2143 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
2145 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2146 set_cc_static(s);
2147 return NO_EXIT;
2150 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
2152 int l = get_field(s->fields, l1);
2153 TCGv_i32 vl;
2155 switch (l + 1) {
2156 case 1:
2157 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2158 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2159 break;
2160 case 2:
2161 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2162 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2163 break;
2164 case 4:
2165 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2166 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2167 break;
2168 case 8:
2169 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2170 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2171 break;
2172 default:
2173 potential_page_fault(s);
2174 vl = tcg_const_i32(l);
2175 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2176 tcg_temp_free_i32(vl);
2177 set_cc_static(s);
2178 return NO_EXIT;
2180 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2181 return NO_EXIT;
2184 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2186 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2187 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2188 potential_page_fault(s);
2189 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2190 tcg_temp_free_i32(r1);
2191 tcg_temp_free_i32(r3);
2192 set_cc_static(s);
2193 return NO_EXIT;
2196 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
2198 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2199 TCGv_i32 t1 = tcg_temp_new_i32();
2200 tcg_gen_trunc_i64_i32(t1, o->in1);
2201 potential_page_fault(s);
2202 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2203 set_cc_static(s);
2204 tcg_temp_free_i32(t1);
2205 tcg_temp_free_i32(m3);
2206 return NO_EXIT;
2209 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2211 int r3 = get_field(s->fields, r3);
2212 potential_page_fault(s);
2213 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2214 set_cc_static(s);
2215 return NO_EXIT;
2218 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2220 int r3 = get_field(s->fields, r3);
2221 potential_page_fault(s);
2222 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2223 set_cc_static(s);
2224 return NO_EXIT;
2227 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2229 int r3 = get_field(s->fields, r3);
2230 TCGv_i64 in3 = tcg_temp_new_i64();
2231 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2232 potential_page_fault(s);
2233 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2234 tcg_temp_free_i64(in3);
2235 set_cc_static(s);
2236 return NO_EXIT;
2239 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2241 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2242 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2243 potential_page_fault(s);
2244 /* XXX rewrite in tcg */
2245 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2246 set_cc_static(s);
2247 return NO_EXIT;
2250 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2252 TCGv_i64 t1 = tcg_temp_new_i64();
2253 TCGv_i32 t2 = tcg_temp_new_i32();
2254 tcg_gen_trunc_i64_i32(t2, o->in1);
2255 gen_helper_cvd(t1, t2);
2256 tcg_temp_free_i32(t2);
2257 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2258 tcg_temp_free_i64(t1);
2259 return NO_EXIT;
2262 #ifndef CONFIG_USER_ONLY
2263 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2265 TCGv_i32 tmp;
2267 check_privileged(s);
2268 potential_page_fault(s);
2270 /* We pretend the format is RX_a so that D2 is the field we want. */
2271 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2272 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2273 tcg_temp_free_i32(tmp);
2274 return NO_EXIT;
2276 #endif
2278 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2280 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2281 return_low128(o->out);
2282 return NO_EXIT;
2285 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2287 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2288 return_low128(o->out);
2289 return NO_EXIT;
2292 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2294 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2295 return_low128(o->out);
2296 return NO_EXIT;
2299 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2301 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2302 return_low128(o->out);
2303 return NO_EXIT;
2306 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2308 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2309 return NO_EXIT;
2312 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2314 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2315 return NO_EXIT;
2318 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2320 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2321 return_low128(o->out2);
2322 return NO_EXIT;
2325 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2327 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2328 return NO_EXIT;
2331 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2333 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2334 tb->flags, (ab)use the tb->cs_base field as the address of
2335 the template in memory, and grab 8 bits of tb->flags/cflags for
2336 the contents of the register. We would then recognize all this
2337 in gen_intermediate_code_internal, generating code for exactly
2338 one instruction. This new TB then gets executed normally.
2340 On the other hand, this seems to be mostly used for modifying
2341 MVC inside of memcpy, which needs a helper call anyway. So
2342 perhaps this doesn't bear thinking about any further. */
2344 TCGv_i64 tmp;
2346 update_psw_addr(s);
2347 gen_op_calc_cc(s);
2349 tmp = tcg_const_i64(s->next_pc);
2350 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2351 tcg_temp_free_i64(tmp);
2353 set_cc_static(s);
2354 return NO_EXIT;
2357 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2359 int m3 = get_field(s->fields, m3);
2360 int pos, len, base = s->insn->data;
2361 TCGv_i64 tmp = tcg_temp_new_i64();
2362 uint64_t ccm;
2364 switch (m3) {
2365 case 0xf:
2366 /* Effectively a 32-bit load. */
2367 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2368 len = 32;
2369 goto one_insert;
2371 case 0xc:
2372 case 0x6:
2373 case 0x3:
2374 /* Effectively a 16-bit load. */
2375 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2376 len = 16;
2377 goto one_insert;
2379 case 0x8:
2380 case 0x4:
2381 case 0x2:
2382 case 0x1:
2383 /* Effectively an 8-bit load. */
2384 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2385 len = 8;
2386 goto one_insert;
2388 one_insert:
2389 pos = base + ctz32(m3) * 8;
2390 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2391 ccm = ((1ull << len) - 1) << pos;
2392 break;
2394 default:
2395 /* This is going to be a sequence of loads and inserts. */
2396 pos = base + 32 - 8;
2397 ccm = 0;
2398 while (m3) {
2399 if (m3 & 0x8) {
2400 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2401 tcg_gen_addi_i64(o->in2, o->in2, 1);
2402 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2403 ccm |= 0xff << pos;
2405 m3 = (m3 << 1) & 0xf;
2406 pos -= 8;
2408 break;
2411 tcg_gen_movi_i64(tmp, ccm);
2412 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2413 tcg_temp_free_i64(tmp);
2414 return NO_EXIT;
2417 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2419 int shift = s->insn->data & 0xff;
2420 int size = s->insn->data >> 8;
2421 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2422 return NO_EXIT;
2425 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2427 gen_helper_ldeb(o->out, cpu_env, o->in2);
2428 return NO_EXIT;
2431 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2433 gen_helper_ledb(o->out, cpu_env, o->in2);
2434 return NO_EXIT;
2437 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2439 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2440 return NO_EXIT;
2443 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2445 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2446 return NO_EXIT;
2449 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2451 gen_helper_lxdb(o->out, cpu_env, o->in2);
2452 return_low128(o->out2);
2453 return NO_EXIT;
2456 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2458 gen_helper_lxeb(o->out, cpu_env, o->in2);
2459 return_low128(o->out2);
2460 return NO_EXIT;
2463 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2465 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2466 return NO_EXIT;
2469 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2471 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2472 return NO_EXIT;
2475 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2477 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2478 return NO_EXIT;
2481 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2483 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2484 return NO_EXIT;
2487 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2489 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2490 return NO_EXIT;
2493 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2495 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2496 return NO_EXIT;
2499 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2501 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2502 return NO_EXIT;
2505 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2507 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2508 return NO_EXIT;
2511 #ifndef CONFIG_USER_ONLY
2512 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2514 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2515 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2516 check_privileged(s);
2517 potential_page_fault(s);
2518 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2519 tcg_temp_free_i32(r1);
2520 tcg_temp_free_i32(r3);
2521 return NO_EXIT;
2524 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2526 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2527 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2528 check_privileged(s);
2529 potential_page_fault(s);
2530 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2531 tcg_temp_free_i32(r1);
2532 tcg_temp_free_i32(r3);
2533 return NO_EXIT;
2535 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2537 check_privileged(s);
2538 potential_page_fault(s);
2539 gen_helper_lra(o->out, cpu_env, o->in2);
2540 set_cc_static(s);
2541 return NO_EXIT;
2544 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2546 TCGv_i64 t1, t2;
2548 check_privileged(s);
2550 t1 = tcg_temp_new_i64();
2551 t2 = tcg_temp_new_i64();
2552 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2553 tcg_gen_addi_i64(o->in2, o->in2, 4);
2554 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2555 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2556 tcg_gen_shli_i64(t1, t1, 32);
2557 gen_helper_load_psw(cpu_env, t1, t2);
2558 tcg_temp_free_i64(t1);
2559 tcg_temp_free_i64(t2);
2560 return EXIT_NORETURN;
2562 #endif
2564 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2566 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2567 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2568 potential_page_fault(s);
2569 gen_helper_lam(cpu_env, r1, o->in2, r3);
2570 tcg_temp_free_i32(r1);
2571 tcg_temp_free_i32(r3);
2572 return NO_EXIT;
2575 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2577 int r1 = get_field(s->fields, r1);
2578 int r3 = get_field(s->fields, r3);
2579 TCGv_i64 t = tcg_temp_new_i64();
2580 TCGv_i64 t4 = tcg_const_i64(4);
2582 while (1) {
2583 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2584 store_reg32_i64(r1, t);
2585 if (r1 == r3) {
2586 break;
2588 tcg_gen_add_i64(o->in2, o->in2, t4);
2589 r1 = (r1 + 1) & 15;
2592 tcg_temp_free_i64(t);
2593 tcg_temp_free_i64(t4);
2594 return NO_EXIT;
2597 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2599 int r1 = get_field(s->fields, r1);
2600 int r3 = get_field(s->fields, r3);
2601 TCGv_i64 t = tcg_temp_new_i64();
2602 TCGv_i64 t4 = tcg_const_i64(4);
2604 while (1) {
2605 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2606 store_reg32h_i64(r1, t);
2607 if (r1 == r3) {
2608 break;
2610 tcg_gen_add_i64(o->in2, o->in2, t4);
2611 r1 = (r1 + 1) & 15;
2614 tcg_temp_free_i64(t);
2615 tcg_temp_free_i64(t4);
2616 return NO_EXIT;
2619 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2621 int r1 = get_field(s->fields, r1);
2622 int r3 = get_field(s->fields, r3);
2623 TCGv_i64 t8 = tcg_const_i64(8);
2625 while (1) {
2626 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2627 if (r1 == r3) {
2628 break;
2630 tcg_gen_add_i64(o->in2, o->in2, t8);
2631 r1 = (r1 + 1) & 15;
2634 tcg_temp_free_i64(t8);
2635 return NO_EXIT;
2638 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2640 o->out = o->in2;
2641 o->g_out = o->g_in2;
2642 TCGV_UNUSED_I64(o->in2);
2643 o->g_in2 = false;
2644 return NO_EXIT;
2647 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2649 o->out = o->in1;
2650 o->out2 = o->in2;
2651 o->g_out = o->g_in1;
2652 o->g_out2 = o->g_in2;
2653 TCGV_UNUSED_I64(o->in1);
2654 TCGV_UNUSED_I64(o->in2);
2655 o->g_in1 = o->g_in2 = false;
2656 return NO_EXIT;
2659 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2661 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2662 potential_page_fault(s);
2663 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2664 tcg_temp_free_i32(l);
2665 return NO_EXIT;
2668 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2670 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2671 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2672 potential_page_fault(s);
2673 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2674 tcg_temp_free_i32(r1);
2675 tcg_temp_free_i32(r2);
2676 set_cc_static(s);
2677 return NO_EXIT;
2680 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2682 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2683 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2684 potential_page_fault(s);
2685 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2686 tcg_temp_free_i32(r1);
2687 tcg_temp_free_i32(r3);
2688 set_cc_static(s);
2689 return NO_EXIT;
2692 #ifndef CONFIG_USER_ONLY
2693 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2695 int r1 = get_field(s->fields, l1);
2696 check_privileged(s);
2697 potential_page_fault(s);
2698 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2699 set_cc_static(s);
2700 return NO_EXIT;
2703 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2705 int r1 = get_field(s->fields, l1);
2706 check_privileged(s);
2707 potential_page_fault(s);
2708 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2709 set_cc_static(s);
2710 return NO_EXIT;
2712 #endif
2714 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2716 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2717 return NO_EXIT;
2720 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2722 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2723 return_low128(o->out2);
2724 return NO_EXIT;
2727 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2729 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2730 return NO_EXIT;
2733 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2735 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2736 return NO_EXIT;
2739 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2741 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2742 return NO_EXIT;
2745 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2747 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2748 return_low128(o->out2);
2749 return NO_EXIT;
2752 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2754 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2755 return_low128(o->out2);
2756 return NO_EXIT;
2759 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2761 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2762 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2763 tcg_temp_free_i64(r3);
2764 return NO_EXIT;
2767 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2769 int r3 = get_field(s->fields, r3);
2770 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2771 return NO_EXIT;
2774 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2776 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2777 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2778 tcg_temp_free_i64(r3);
2779 return NO_EXIT;
2782 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2784 int r3 = get_field(s->fields, r3);
2785 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2786 return NO_EXIT;
2789 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2791 gen_helper_nabs_i64(o->out, o->in2);
2792 return NO_EXIT;
2795 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2797 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2798 return NO_EXIT;
2801 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2803 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2804 return NO_EXIT;
2807 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2809 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2810 tcg_gen_mov_i64(o->out2, o->in2);
2811 return NO_EXIT;
2814 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2816 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2817 potential_page_fault(s);
2818 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2819 tcg_temp_free_i32(l);
2820 set_cc_static(s);
2821 return NO_EXIT;
2824 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2826 tcg_gen_neg_i64(o->out, o->in2);
2827 return NO_EXIT;
2830 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2832 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2833 return NO_EXIT;
2836 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2838 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2839 return NO_EXIT;
2842 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2844 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2845 tcg_gen_mov_i64(o->out2, o->in2);
2846 return NO_EXIT;
2849 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2851 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2852 potential_page_fault(s);
2853 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2854 tcg_temp_free_i32(l);
2855 set_cc_static(s);
2856 return NO_EXIT;
2859 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2861 tcg_gen_or_i64(o->out, o->in1, o->in2);
2862 return NO_EXIT;
2865 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2867 int shift = s->insn->data & 0xff;
2868 int size = s->insn->data >> 8;
2869 uint64_t mask = ((1ull << size) - 1) << shift;
2871 assert(!o->g_in2);
2872 tcg_gen_shli_i64(o->in2, o->in2, shift);
2873 tcg_gen_or_i64(o->out, o->in1, o->in2);
2875 /* Produce the CC from only the bits manipulated. */
2876 tcg_gen_andi_i64(cc_dst, o->out, mask);
2877 set_cc_nz_u64(s, cc_dst);
2878 return NO_EXIT;
2881 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2883 tcg_gen_bswap16_i64(o->out, o->in2);
2884 return NO_EXIT;
2887 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2889 tcg_gen_bswap32_i64(o->out, o->in2);
2890 return NO_EXIT;
2893 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2895 tcg_gen_bswap64_i64(o->out, o->in2);
2896 return NO_EXIT;
2899 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2901 TCGv_i32 t1 = tcg_temp_new_i32();
2902 TCGv_i32 t2 = tcg_temp_new_i32();
2903 TCGv_i32 to = tcg_temp_new_i32();
2904 tcg_gen_trunc_i64_i32(t1, o->in1);
2905 tcg_gen_trunc_i64_i32(t2, o->in2);
2906 tcg_gen_rotl_i32(to, t1, t2);
2907 tcg_gen_extu_i32_i64(o->out, to);
2908 tcg_temp_free_i32(t1);
2909 tcg_temp_free_i32(t2);
2910 tcg_temp_free_i32(to);
2911 return NO_EXIT;
2914 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2916 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2917 return NO_EXIT;
2920 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2922 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2923 return NO_EXIT;
2926 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2928 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2929 return NO_EXIT;
2932 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2934 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2935 return_low128(o->out2);
2936 return NO_EXIT;
2939 #ifndef CONFIG_USER_ONLY
2940 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2942 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2943 check_privileged(s);
2944 potential_page_fault(s);
2945 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2946 tcg_temp_free_i32(r1);
2947 return NO_EXIT;
2949 #endif
2951 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2953 uint64_t sign = 1ull << s->insn->data;
2954 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2955 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2956 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2957 /* The arithmetic left shift is curious in that it does not affect
2958 the sign bit. Copy that over from the source unchanged. */
2959 tcg_gen_andi_i64(o->out, o->out, ~sign);
2960 tcg_gen_andi_i64(o->in1, o->in1, sign);
2961 tcg_gen_or_i64(o->out, o->out, o->in1);
2962 return NO_EXIT;
2965 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2967 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2968 return NO_EXIT;
2971 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2973 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2974 return NO_EXIT;
2977 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2979 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2980 return NO_EXIT;
2983 #ifndef CONFIG_USER_ONLY
2984 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2986 check_privileged(s);
2987 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2988 return NO_EXIT;
2991 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2993 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2994 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2995 check_privileged(s);
2996 potential_page_fault(s);
2997 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2998 tcg_temp_free_i32(r1);
2999 tcg_temp_free_i32(r3);
3000 return NO_EXIT;
3003 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3005 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3006 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3007 check_privileged(s);
3008 potential_page_fault(s);
3009 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3010 tcg_temp_free_i32(r1);
3011 tcg_temp_free_i32(r3);
3012 return NO_EXIT;
3015 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3017 uint64_t i2 = get_field(s->fields, i2);
3018 TCGv_i64 t;
3020 check_privileged(s);
3022 /* It is important to do what the instruction name says: STORE THEN.
3023 If we let the output hook perform the store then if we fault and
3024 restart, we'll have the wrong SYSTEM MASK in place. */
3025 t = tcg_temp_new_i64();
3026 tcg_gen_shri_i64(t, psw_mask, 56);
3027 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3028 tcg_temp_free_i64(t);
3030 if (s->fields->op == 0xac) {
3031 tcg_gen_andi_i64(psw_mask, psw_mask,
3032 (i2 << 56) | 0x00ffffffffffffffull);
3033 } else {
3034 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3036 return NO_EXIT;
3038 #endif
3040 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3042 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3043 return NO_EXIT;
3046 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3048 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3049 return NO_EXIT;
3052 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3054 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3055 return NO_EXIT;
3058 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3060 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3061 return NO_EXIT;
3064 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3066 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3067 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3068 potential_page_fault(s);
3069 gen_helper_stam(cpu_env, r1, o->in2, r3);
3070 tcg_temp_free_i32(r1);
3071 tcg_temp_free_i32(r3);
3072 return NO_EXIT;
3075 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3077 int m3 = get_field(s->fields, m3);
3078 int pos, base = s->insn->data;
3079 TCGv_i64 tmp = tcg_temp_new_i64();
3081 pos = base + ctz32(m3) * 8;
3082 switch (m3) {
3083 case 0xf:
3084 /* Effectively a 32-bit store. */
3085 tcg_gen_shri_i64(tmp, o->in1, pos);
3086 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3087 break;
3089 case 0xc:
3090 case 0x6:
3091 case 0x3:
3092 /* Effectively a 16-bit store. */
3093 tcg_gen_shri_i64(tmp, o->in1, pos);
3094 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3095 break;
3097 case 0x8:
3098 case 0x4:
3099 case 0x2:
3100 case 0x1:
3101 /* Effectively an 8-bit store. */
3102 tcg_gen_shri_i64(tmp, o->in1, pos);
3103 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3104 break;
3106 default:
3107 /* This is going to be a sequence of shifts and stores. */
3108 pos = base + 32 - 8;
3109 while (m3) {
3110 if (m3 & 0x8) {
3111 tcg_gen_shri_i64(tmp, o->in1, pos);
3112 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3113 tcg_gen_addi_i64(o->in2, o->in2, 1);
3115 m3 = (m3 << 1) & 0xf;
3116 pos -= 8;
3118 break;
3120 tcg_temp_free_i64(tmp);
3121 return NO_EXIT;
3124 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3126 int r1 = get_field(s->fields, r1);
3127 int r3 = get_field(s->fields, r3);
3128 int size = s->insn->data;
3129 TCGv_i64 tsize = tcg_const_i64(size);
3131 while (1) {
3132 if (size == 8) {
3133 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3134 } else {
3135 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3137 if (r1 == r3) {
3138 break;
3140 tcg_gen_add_i64(o->in2, o->in2, tsize);
3141 r1 = (r1 + 1) & 15;
3144 tcg_temp_free_i64(tsize);
3145 return NO_EXIT;
3148 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3150 int r1 = get_field(s->fields, r1);
3151 int r3 = get_field(s->fields, r3);
3152 TCGv_i64 t = tcg_temp_new_i64();
3153 TCGv_i64 t4 = tcg_const_i64(4);
3154 TCGv_i64 t32 = tcg_const_i64(32);
3156 while (1) {
3157 tcg_gen_shl_i64(t, regs[r1], t32);
3158 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3159 if (r1 == r3) {
3160 break;
3162 tcg_gen_add_i64(o->in2, o->in2, t4);
3163 r1 = (r1 + 1) & 15;
3166 tcg_temp_free_i64(t);
3167 tcg_temp_free_i64(t4);
3168 tcg_temp_free_i64(t32);
3169 return NO_EXIT;
3172 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3174 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3175 return NO_EXIT;
3178 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3180 TCGv_i64 cc;
3182 assert(!o->g_in2);
3183 tcg_gen_not_i64(o->in2, o->in2);
3184 tcg_gen_add_i64(o->out, o->in1, o->in2);
3186 /* XXX possible optimization point */
3187 gen_op_calc_cc(s);
3188 cc = tcg_temp_new_i64();
3189 tcg_gen_extu_i32_i64(cc, cc_op);
3190 tcg_gen_shri_i64(cc, cc, 1);
3191 tcg_gen_add_i64(o->out, o->out, cc);
3192 tcg_temp_free_i64(cc);
3193 return NO_EXIT;
3196 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3198 TCGv_i32 t;
3200 update_psw_addr(s);
3201 gen_op_calc_cc(s);
3203 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3204 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3205 tcg_temp_free_i32(t);
3207 t = tcg_const_i32(s->next_pc - s->pc);
3208 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3209 tcg_temp_free_i32(t);
3211 gen_exception(EXCP_SVC);
3212 return EXIT_NORETURN;
3215 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3217 gen_helper_tceb(cc_op, o->in1, o->in2);
3218 set_cc_static(s);
3219 return NO_EXIT;
3222 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3224 gen_helper_tcdb(cc_op, o->in1, o->in2);
3225 set_cc_static(s);
3226 return NO_EXIT;
3229 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3231 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3232 set_cc_static(s);
3233 return NO_EXIT;
3236 #ifndef CONFIG_USER_ONLY
3237 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3239 potential_page_fault(s);
3240 gen_helper_tprot(cc_op, o->addr1, o->in2);
3241 set_cc_static(s);
3242 return NO_EXIT;
3244 #endif
3246 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3248 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3249 potential_page_fault(s);
3250 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3251 tcg_temp_free_i32(l);
3252 set_cc_static(s);
3253 return NO_EXIT;
3256 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3258 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3259 potential_page_fault(s);
3260 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3261 tcg_temp_free_i32(l);
3262 return NO_EXIT;
3265 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3267 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3268 potential_page_fault(s);
3269 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3270 tcg_temp_free_i32(l);
3271 set_cc_static(s);
3272 return NO_EXIT;
3275 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3277 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3278 return NO_EXIT;
3281 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3283 int shift = s->insn->data & 0xff;
3284 int size = s->insn->data >> 8;
3285 uint64_t mask = ((1ull << size) - 1) << shift;
3287 assert(!o->g_in2);
3288 tcg_gen_shli_i64(o->in2, o->in2, shift);
3289 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3291 /* Produce the CC from only the bits manipulated. */
3292 tcg_gen_andi_i64(cc_dst, o->out, mask);
3293 set_cc_nz_u64(s, cc_dst);
3294 return NO_EXIT;
3297 /* ====================================================================== */
3298 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3299 the original inputs), update the various cc data structures in order to
3300 be able to compute the new condition code. */
3302 static void cout_abs32(DisasContext *s, DisasOps *o)
3304 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3307 static void cout_abs64(DisasContext *s, DisasOps *o)
3309 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3312 static void cout_adds32(DisasContext *s, DisasOps *o)
3314 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3317 static void cout_adds64(DisasContext *s, DisasOps *o)
3319 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3322 static void cout_addu32(DisasContext *s, DisasOps *o)
3324 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3327 static void cout_addu64(DisasContext *s, DisasOps *o)
3329 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3332 static void cout_addc32(DisasContext *s, DisasOps *o)
3334 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3337 static void cout_addc64(DisasContext *s, DisasOps *o)
3339 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3342 static void cout_cmps32(DisasContext *s, DisasOps *o)
3344 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3347 static void cout_cmps64(DisasContext *s, DisasOps *o)
3349 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3352 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3354 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3357 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3359 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3362 static void cout_f32(DisasContext *s, DisasOps *o)
3364 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3367 static void cout_f64(DisasContext *s, DisasOps *o)
3369 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3372 static void cout_f128(DisasContext *s, DisasOps *o)
3374 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3377 static void cout_nabs32(DisasContext *s, DisasOps *o)
3379 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3382 static void cout_nabs64(DisasContext *s, DisasOps *o)
3384 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3387 static void cout_neg32(DisasContext *s, DisasOps *o)
3389 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3392 static void cout_neg64(DisasContext *s, DisasOps *o)
3394 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3397 static void cout_nz32(DisasContext *s, DisasOps *o)
3399 tcg_gen_ext32u_i64(cc_dst, o->out);
3400 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3403 static void cout_nz64(DisasContext *s, DisasOps *o)
3405 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3408 static void cout_s32(DisasContext *s, DisasOps *o)
3410 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3413 static void cout_s64(DisasContext *s, DisasOps *o)
3415 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3418 static void cout_subs32(DisasContext *s, DisasOps *o)
3420 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3423 static void cout_subs64(DisasContext *s, DisasOps *o)
3425 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3428 static void cout_subu32(DisasContext *s, DisasOps *o)
3430 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3433 static void cout_subu64(DisasContext *s, DisasOps *o)
3435 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3438 static void cout_subb32(DisasContext *s, DisasOps *o)
3440 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3443 static void cout_subb64(DisasContext *s, DisasOps *o)
3445 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3448 static void cout_tm32(DisasContext *s, DisasOps *o)
3450 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3453 static void cout_tm64(DisasContext *s, DisasOps *o)
3455 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3458 /* ====================================================================== */
3459 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3460 with the TCG register to which we will write. Used in combination with
3461 the "wout" generators, in some cases we need a new temporary, and in
3462 some cases we can write to a TCG global. */
3464 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3466 o->out = tcg_temp_new_i64();
3469 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3471 o->out = tcg_temp_new_i64();
3472 o->out2 = tcg_temp_new_i64();
3475 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3477 o->out = regs[get_field(f, r1)];
3478 o->g_out = true;
3481 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3483 /* ??? Specification exception: r1 must be even. */
3484 int r1 = get_field(f, r1);
3485 o->out = regs[r1];
3486 o->out2 = regs[(r1 + 1) & 15];
3487 o->g_out = o->g_out2 = true;
3490 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3492 o->out = fregs[get_field(f, r1)];
3493 o->g_out = true;
3496 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3498 /* ??? Specification exception: r1 must be < 14. */
3499 int r1 = get_field(f, r1);
3500 o->out = fregs[r1];
3501 o->out2 = fregs[(r1 + 2) & 15];
3502 o->g_out = o->g_out2 = true;
3505 /* ====================================================================== */
3506 /* The "Write OUTput" generators. These generally perform some non-trivial
3507 copy of data to TCG globals, or to main memory. The trivial cases are
3508 generally handled by having a "prep" generator install the TCG global
3509 as the destination of the operation. */
3511 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3513 store_reg(get_field(f, r1), o->out);
3516 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3518 int r1 = get_field(f, r1);
3519 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3522 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3524 int r1 = get_field(f, r1);
3525 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3528 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3530 store_reg32_i64(get_field(f, r1), o->out);
3533 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3535 /* ??? Specification exception: r1 must be even. */
3536 int r1 = get_field(f, r1);
3537 store_reg32_i64(r1, o->out);
3538 store_reg32_i64((r1 + 1) & 15, o->out2);
3541 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3543 /* ??? Specification exception: r1 must be even. */
3544 int r1 = get_field(f, r1);
3545 store_reg32_i64((r1 + 1) & 15, o->out);
3546 tcg_gen_shri_i64(o->out, o->out, 32);
3547 store_reg32_i64(r1, o->out);
3550 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3552 store_freg32_i64(get_field(f, r1), o->out);
3555 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3557 store_freg(get_field(f, r1), o->out);
3560 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3562 /* ??? Specification exception: r1 must be < 14. */
3563 int f1 = get_field(s->fields, r1);
3564 store_freg(f1, o->out);
3565 store_freg((f1 + 2) & 15, o->out2);
3568 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3570 if (get_field(f, r1) != get_field(f, r2)) {
3571 store_reg32_i64(get_field(f, r1), o->out);
3575 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3577 if (get_field(f, r1) != get_field(f, r2)) {
3578 store_freg32_i64(get_field(f, r1), o->out);
3582 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3584 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3587 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3589 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3592 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3594 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3597 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3599 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3602 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3604 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3607 /* ====================================================================== */
3608 /* The "INput 1" generators. These load the first operand to an insn. */
3610 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3612 o->in1 = load_reg(get_field(f, r1));
3615 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3617 o->in1 = regs[get_field(f, r1)];
3618 o->g_in1 = true;
3621 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3623 o->in1 = tcg_temp_new_i64();
3624 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3627 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3629 o->in1 = tcg_temp_new_i64();
3630 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3633 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3635 o->in1 = tcg_temp_new_i64();
3636 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3639 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3641 /* ??? Specification exception: r1 must be even. */
3642 int r1 = get_field(f, r1);
3643 o->in1 = load_reg((r1 + 1) & 15);
3646 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3648 /* ??? Specification exception: r1 must be even. */
3649 int r1 = get_field(f, r1);
3650 o->in1 = tcg_temp_new_i64();
3651 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3654 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3656 /* ??? Specification exception: r1 must be even. */
3657 int r1 = get_field(f, r1);
3658 o->in1 = tcg_temp_new_i64();
3659 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3662 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3664 /* ??? Specification exception: r1 must be even. */
3665 int r1 = get_field(f, r1);
3666 o->in1 = tcg_temp_new_i64();
3667 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3670 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3672 o->in1 = load_reg(get_field(f, r2));
3675 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3677 o->in1 = load_reg(get_field(f, r3));
3680 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3682 o->in1 = regs[get_field(f, r3)];
3683 o->g_in1 = true;
3686 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3688 o->in1 = tcg_temp_new_i64();
3689 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3692 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3694 o->in1 = tcg_temp_new_i64();
3695 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3698 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3700 o->in1 = load_freg32_i64(get_field(f, r1));
3703 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3705 o->in1 = fregs[get_field(f, r1)];
3706 o->g_in1 = true;
3709 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3711 /* ??? Specification exception: r1 must be < 14. */
3712 int r1 = get_field(f, r1);
3713 o->out = fregs[r1];
3714 o->out2 = fregs[(r1 + 2) & 15];
3715 o->g_out = o->g_out2 = true;
3718 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3720 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3723 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3725 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3726 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3729 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3731 in1_la1(s, f, o);
3732 o->in1 = tcg_temp_new_i64();
3733 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3736 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3738 in1_la1(s, f, o);
3739 o->in1 = tcg_temp_new_i64();
3740 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3743 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3745 in1_la1(s, f, o);
3746 o->in1 = tcg_temp_new_i64();
3747 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3750 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3752 in1_la1(s, f, o);
3753 o->in1 = tcg_temp_new_i64();
3754 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3757 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3759 in1_la1(s, f, o);
3760 o->in1 = tcg_temp_new_i64();
3761 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3764 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3766 in1_la1(s, f, o);
3767 o->in1 = tcg_temp_new_i64();
3768 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3771 /* ====================================================================== */
3772 /* The "INput 2" generators. These load the second operand to an insn. */
3774 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3776 o->in2 = regs[get_field(f, r1)];
3777 o->g_in2 = true;
3780 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3782 o->in2 = tcg_temp_new_i64();
3783 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3786 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3788 o->in2 = tcg_temp_new_i64();
3789 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3792 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3794 o->in2 = load_reg(get_field(f, r2));
3797 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3799 o->in2 = regs[get_field(f, r2)];
3800 o->g_in2 = true;
3803 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3805 int r2 = get_field(f, r2);
3806 if (r2 != 0) {
3807 o->in2 = load_reg(r2);
3811 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3813 o->in2 = tcg_temp_new_i64();
3814 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3817 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3819 o->in2 = tcg_temp_new_i64();
3820 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3823 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3825 o->in2 = tcg_temp_new_i64();
3826 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3829 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3831 o->in2 = tcg_temp_new_i64();
3832 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3835 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3837 o->in2 = load_reg(get_field(f, r3));
3840 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3842 o->in2 = tcg_temp_new_i64();
3843 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3846 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3848 o->in2 = tcg_temp_new_i64();
3849 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3852 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3854 o->in2 = load_freg32_i64(get_field(f, r2));
3857 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3859 o->in2 = fregs[get_field(f, r2)];
3860 o->g_in2 = true;
3863 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3865 /* ??? Specification exception: r1 must be < 14. */
3866 int r2 = get_field(f, r2);
3867 o->in1 = fregs[r2];
3868 o->in2 = fregs[(r2 + 2) & 15];
3869 o->g_in1 = o->g_in2 = true;
3872 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3874 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3875 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3878 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3880 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3883 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3885 help_l2_shift(s, f, o, 31);
3888 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3890 help_l2_shift(s, f, o, 63);
3893 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3895 in2_a2(s, f, o);
3896 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3899 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3901 in2_a2(s, f, o);
3902 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3905 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3907 in2_a2(s, f, o);
3908 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3911 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3913 in2_a2(s, f, o);
3914 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3917 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3919 in2_a2(s, f, o);
3920 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3923 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3925 in2_a2(s, f, o);
3926 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3929 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3931 in2_ri2(s, f, o);
3932 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3935 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3937 in2_ri2(s, f, o);
3938 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3941 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3943 in2_ri2(s, f, o);
3944 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3947 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3949 in2_ri2(s, f, o);
3950 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3953 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3955 o->in2 = tcg_const_i64(get_field(f, i2));
3958 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3960 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3963 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3965 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3968 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3970 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3973 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3975 uint64_t i2 = (uint16_t)get_field(f, i2);
3976 o->in2 = tcg_const_i64(i2 << s->insn->data);
3979 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3981 uint64_t i2 = (uint32_t)get_field(f, i2);
3982 o->in2 = tcg_const_i64(i2 << s->insn->data);
3985 /* ====================================================================== */
3987 /* Find opc within the table of insns. This is formulated as a switch
3988 statement so that (1) we get compile-time notice of cut-paste errors
3989 for duplicated opcodes, and (2) the compiler generates the binary
3990 search tree, rather than us having to post-process the table. */
3992 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3993 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3995 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3997 enum DisasInsnEnum {
3998 #include "insn-data.def"
4001 #undef D
4002 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4003 .opc = OPC, \
4004 .fmt = FMT_##FT, \
4005 .fac = FAC_##FC, \
4006 .name = #NM, \
4007 .help_in1 = in1_##I1, \
4008 .help_in2 = in2_##I2, \
4009 .help_prep = prep_##P, \
4010 .help_wout = wout_##W, \
4011 .help_cout = cout_##CC, \
4012 .help_op = op_##OP, \
4013 .data = D \
4016 /* Allow 0 to be used for NULL in the table below. */
4017 #define in1_0 NULL
4018 #define in2_0 NULL
4019 #define prep_0 NULL
4020 #define wout_0 NULL
4021 #define cout_0 NULL
4022 #define op_0 NULL
4024 static const DisasInsn insn_info[] = {
4025 #include "insn-data.def"
4028 #undef D
4029 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4030 case OPC: return &insn_info[insn_ ## NM];
4032 static const DisasInsn *lookup_opc(uint16_t opc)
4034 switch (opc) {
4035 #include "insn-data.def"
4036 default:
4037 return NULL;
4041 #undef D
4042 #undef C
4044 /* Extract a field from the insn. The INSN should be left-aligned in
4045 the uint64_t so that we can more easily utilize the big-bit-endian
4046 definitions we extract from the Principals of Operation. */
4048 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4050 uint32_t r, m;
4052 if (f->size == 0) {
4053 return;
4056 /* Zero extract the field from the insn. */
4057 r = (insn << f->beg) >> (64 - f->size);
4059 /* Sign-extend, or un-swap the field as necessary. */
4060 switch (f->type) {
4061 case 0: /* unsigned */
4062 break;
4063 case 1: /* signed */
4064 assert(f->size <= 32);
4065 m = 1u << (f->size - 1);
4066 r = (r ^ m) - m;
4067 break;
4068 case 2: /* dl+dh split, signed 20 bit. */
4069 r = ((int8_t)r << 12) | (r >> 8);
4070 break;
4071 default:
4072 abort();
4075 /* Validate that the "compressed" encoding we selected above is valid.
4076 I.e. we havn't make two different original fields overlap. */
4077 assert(((o->presentC >> f->indexC) & 1) == 0);
4078 o->presentC |= 1 << f->indexC;
4079 o->presentO |= 1 << f->indexO;
4081 o->c[f->indexC] = r;
4084 /* Lookup the insn at the current PC, extracting the operands into O and
4085 returning the info struct for the insn. Returns NULL for invalid insn. */
4087 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4088 DisasFields *f)
4090 uint64_t insn, pc = s->pc;
4091 int op, op2, ilen;
4092 const DisasInsn *info;
4094 insn = ld_code2(env, pc);
4095 op = (insn >> 8) & 0xff;
4096 ilen = get_ilen(op);
4097 s->next_pc = s->pc + ilen;
4099 switch (ilen) {
4100 case 2:
4101 insn = insn << 48;
4102 break;
4103 case 4:
4104 insn = ld_code4(env, pc) << 32;
4105 break;
4106 case 6:
4107 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4108 break;
4109 default:
4110 abort();
4113 /* We can't actually determine the insn format until we've looked up
4114 the full insn opcode. Which we can't do without locating the
4115 secondary opcode. Assume by default that OP2 is at bit 40; for
4116 those smaller insns that don't actually have a secondary opcode
4117 this will correctly result in OP2 = 0. */
4118 switch (op) {
4119 case 0x01: /* E */
4120 case 0x80: /* S */
4121 case 0x82: /* S */
4122 case 0x93: /* S */
4123 case 0xb2: /* S, RRF, RRE */
4124 case 0xb3: /* RRE, RRD, RRF */
4125 case 0xb9: /* RRE, RRF */
4126 case 0xe5: /* SSE, SIL */
4127 op2 = (insn << 8) >> 56;
4128 break;
4129 case 0xa5: /* RI */
4130 case 0xa7: /* RI */
4131 case 0xc0: /* RIL */
4132 case 0xc2: /* RIL */
4133 case 0xc4: /* RIL */
4134 case 0xc6: /* RIL */
4135 case 0xc8: /* SSF */
4136 case 0xcc: /* RIL */
4137 op2 = (insn << 12) >> 60;
4138 break;
4139 case 0xd0 ... 0xdf: /* SS */
4140 case 0xe1: /* SS */
4141 case 0xe2: /* SS */
4142 case 0xe8: /* SS */
4143 case 0xe9: /* SS */
4144 case 0xea: /* SS */
4145 case 0xee ... 0xf3: /* SS */
4146 case 0xf8 ... 0xfd: /* SS */
4147 op2 = 0;
4148 break;
4149 default:
4150 op2 = (insn << 40) >> 56;
4151 break;
4154 memset(f, 0, sizeof(*f));
4155 f->op = op;
4156 f->op2 = op2;
4158 /* Lookup the instruction. */
4159 info = lookup_opc(op << 8 | op2);
4161 /* If we found it, extract the operands. */
4162 if (info != NULL) {
4163 DisasFormat fmt = info->fmt;
4164 int i;
4166 for (i = 0; i < NUM_C_FIELD; ++i) {
4167 extract_field(f, &format_info[fmt].op[i], insn);
4170 return info;
4173 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4175 const DisasInsn *insn;
4176 ExitStatus ret = NO_EXIT;
4177 DisasFields f;
4178 DisasOps o;
4180 insn = extract_insn(env, s, &f);
4182 /* If not found, try the old interpreter. This includes ILLOPC. */
4183 if (insn == NULL) {
4184 disas_s390_insn(env, s);
4185 switch (s->is_jmp) {
4186 case DISAS_NEXT:
4187 ret = NO_EXIT;
4188 break;
4189 case DISAS_TB_JUMP:
4190 ret = EXIT_GOTO_TB;
4191 break;
4192 case DISAS_JUMP:
4193 ret = EXIT_PC_UPDATED;
4194 break;
4195 case DISAS_EXCP:
4196 ret = EXIT_NORETURN;
4197 break;
4198 default:
4199 abort();
4202 s->pc = s->next_pc;
4203 return ret;
4206 /* Set up the strutures we use to communicate with the helpers. */
4207 s->insn = insn;
4208 s->fields = &f;
4209 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4210 TCGV_UNUSED_I64(o.out);
4211 TCGV_UNUSED_I64(o.out2);
4212 TCGV_UNUSED_I64(o.in1);
4213 TCGV_UNUSED_I64(o.in2);
4214 TCGV_UNUSED_I64(o.addr1);
4216 /* Implement the instruction. */
4217 if (insn->help_in1) {
4218 insn->help_in1(s, &f, &o);
4220 if (insn->help_in2) {
4221 insn->help_in2(s, &f, &o);
4223 if (insn->help_prep) {
4224 insn->help_prep(s, &f, &o);
4226 if (insn->help_op) {
4227 ret = insn->help_op(s, &o);
4229 if (insn->help_wout) {
4230 insn->help_wout(s, &f, &o);
4232 if (insn->help_cout) {
4233 insn->help_cout(s, &o);
4236 /* Free any temporaries created by the helpers. */
4237 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4238 tcg_temp_free_i64(o.out);
4240 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4241 tcg_temp_free_i64(o.out2);
4243 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4244 tcg_temp_free_i64(o.in1);
4246 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4247 tcg_temp_free_i64(o.in2);
4249 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4250 tcg_temp_free_i64(o.addr1);
4253 /* Advance to the next instruction. */
4254 s->pc = s->next_pc;
4255 return ret;
4258 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4259 TranslationBlock *tb,
4260 int search_pc)
4262 DisasContext dc;
4263 target_ulong pc_start;
4264 uint64_t next_page_start;
4265 uint16_t *gen_opc_end;
4266 int j, lj = -1;
4267 int num_insns, max_insns;
4268 CPUBreakpoint *bp;
4269 ExitStatus status;
4270 bool do_debug;
4272 pc_start = tb->pc;
4274 /* 31-bit mode */
4275 if (!(tb->flags & FLAG_MASK_64)) {
4276 pc_start &= 0x7fffffff;
4279 dc.tb = tb;
4280 dc.pc = pc_start;
4281 dc.cc_op = CC_OP_DYNAMIC;
4282 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4283 dc.is_jmp = DISAS_NEXT;
4285 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4287 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4289 num_insns = 0;
4290 max_insns = tb->cflags & CF_COUNT_MASK;
4291 if (max_insns == 0) {
4292 max_insns = CF_COUNT_MASK;
4295 gen_icount_start();
4297 do {
4298 if (search_pc) {
4299 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4300 if (lj < j) {
4301 lj++;
4302 while (lj < j) {
4303 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4306 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4307 gen_opc_cc_op[lj] = dc.cc_op;
4308 tcg_ctx.gen_opc_instr_start[lj] = 1;
4309 tcg_ctx.gen_opc_icount[lj] = num_insns;
4311 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4312 gen_io_start();
4315 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4316 tcg_gen_debug_insn_start(dc.pc);
4319 status = NO_EXIT;
4320 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4321 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4322 if (bp->pc == dc.pc) {
4323 status = EXIT_PC_STALE;
4324 do_debug = true;
4325 break;
4329 if (status == NO_EXIT) {
4330 status = translate_one(env, &dc);
4333 /* If we reach a page boundary, are single stepping,
4334 or exhaust instruction count, stop generation. */
4335 if (status == NO_EXIT
4336 && (dc.pc >= next_page_start
4337 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4338 || num_insns >= max_insns
4339 || singlestep
4340 || env->singlestep_enabled)) {
4341 status = EXIT_PC_STALE;
4343 } while (status == NO_EXIT);
4345 if (tb->cflags & CF_LAST_IO) {
4346 gen_io_end();
4349 switch (status) {
4350 case EXIT_GOTO_TB:
4351 case EXIT_NORETURN:
4352 break;
4353 case EXIT_PC_STALE:
4354 update_psw_addr(&dc);
4355 /* FALLTHRU */
4356 case EXIT_PC_UPDATED:
4357 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4358 gen_op_calc_cc(&dc);
4359 } else {
4360 /* Next TB starts off with CC_OP_DYNAMIC,
4361 so make sure the cc op type is in env */
4362 gen_op_set_cc_op(&dc);
4364 if (do_debug) {
4365 gen_exception(EXCP_DEBUG);
4366 } else {
4367 /* Generate the return instruction */
4368 tcg_gen_exit_tb(0);
4370 break;
4371 default:
4372 abort();
4375 gen_icount_end(tb, num_insns);
4376 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4377 if (search_pc) {
4378 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4379 lj++;
4380 while (lj <= j) {
4381 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4383 } else {
4384 tb->size = dc.pc - pc_start;
4385 tb->icount = num_insns;
4388 #if defined(S390X_DEBUG_DISAS)
4389 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4390 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4391 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4392 qemu_log("\n");
4394 #endif
4397 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4399 gen_intermediate_code_internal(env, tb, 0);
4402 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4404 gen_intermediate_code_internal(env, tb, 1);
4407 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4409 int cc_op;
4410 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4411 cc_op = gen_opc_cc_op[pc_pos];
4412 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4413 env->cc_op = cc_op;