target-s390x: PER successful-branching event support
[qemu/ar7.git] / target-s390x / translate.c
blob0387806b985acad52330eaaf687ac2b08b94e6cf
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
74 #define DISAS_EXCP 4
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
88 return pc;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? " " : "\n");
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
133 if ((i % 4) == 3) {
134 cpu_fprintf(f, "\n");
135 } else {
136 cpu_fprintf(f, " ");
139 #endif
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
146 #endif
148 cpu_fprintf(f, "\n");
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
154 static TCGv_i32 cc_op;
155 static TCGv_i64 cc_src;
156 static TCGv_i64 cc_dst;
157 static TCGv_i64 cc_vr;
159 static char cpu_reg_names[32][4];
160 static TCGv_i64 regs[16];
161 static TCGv_i64 fregs[16];
163 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
165 void s390x_translate_init(void)
167 int i;
169 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
170 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
171 offsetof(CPUS390XState, psw.addr),
172 "psw_addr");
173 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
174 offsetof(CPUS390XState, psw.mask),
175 "psw_mask");
177 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
178 "cc_op");
179 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
180 "cc_src");
181 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
182 "cc_dst");
183 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
184 "cc_vr");
186 for (i = 0; i < 16; i++) {
187 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
188 regs[i] = tcg_global_mem_new(TCG_AREG0,
189 offsetof(CPUS390XState, regs[i]),
190 cpu_reg_names[i]);
193 for (i = 0; i < 16; i++) {
194 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
195 fregs[i] = tcg_global_mem_new(TCG_AREG0,
196 offsetof(CPUS390XState, vregs[i][0].d),
197 cpu_reg_names[i + 16]);
201 static TCGv_i64 load_reg(int reg)
203 TCGv_i64 r = tcg_temp_new_i64();
204 tcg_gen_mov_i64(r, regs[reg]);
205 return r;
208 static TCGv_i64 load_freg32_i64(int reg)
210 TCGv_i64 r = tcg_temp_new_i64();
211 tcg_gen_shri_i64(r, fregs[reg], 32);
212 return r;
215 static void store_reg(int reg, TCGv_i64 v)
217 tcg_gen_mov_i64(regs[reg], v);
220 static void store_freg(int reg, TCGv_i64 v)
222 tcg_gen_mov_i64(fregs[reg], v);
225 static void store_reg32_i64(int reg, TCGv_i64 v)
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
231 static void store_reg32h_i64(int reg, TCGv_i64 v)
233 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
236 static void store_freg32_i64(int reg, TCGv_i64 v)
238 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
241 static void return_low128(TCGv_i64 dest)
243 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
246 static void update_psw_addr(DisasContext *s)
248 /* psw.addr */
249 tcg_gen_movi_i64(psw_addr, s->pc);
252 static void per_branch(DisasContext *s, bool to_next)
254 #ifndef CONFIG_USER_ONLY
255 if (s->tb->flags & FLAG_MASK_PER) {
256 TCGv_i64 pc = tcg_const_i64(s->pc);
257 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
258 gen_helper_per_branch(cpu_env, pc, next_pc);
259 if (to_next) {
260 tcg_temp_free_i64(next_pc);
262 tcg_temp_free_i64(pc);
264 #endif
267 static void per_branch_cond(DisasContext *s, TCGCond cond,
268 TCGv_i64 arg1, TCGv_i64 arg2)
270 #ifndef CONFIG_USER_ONLY
271 if (s->tb->flags & FLAG_MASK_PER) {
272 TCGLabel *lab = gen_new_label();
273 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
275 TCGv_i64 pc = tcg_const_i64(s->pc);
276 gen_helper_per_branch(cpu_env, pc, psw_addr);
277 tcg_temp_free_i64(pc);
279 gen_set_label(lab);
281 #endif
284 static void update_cc_op(DisasContext *s)
286 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
287 tcg_gen_movi_i32(cc_op, s->cc_op);
291 static void potential_page_fault(DisasContext *s)
293 update_psw_addr(s);
294 update_cc_op(s);
297 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
299 return (uint64_t)cpu_lduw_code(env, pc);
302 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
304 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
307 static int get_mem_index(DisasContext *s)
309 switch (s->tb->flags & FLAG_MASK_ASC) {
310 case PSW_ASC_PRIMARY >> 32:
311 return 0;
312 case PSW_ASC_SECONDARY >> 32:
313 return 1;
314 case PSW_ASC_HOME >> 32:
315 return 2;
316 default:
317 tcg_abort();
318 break;
322 static void gen_exception(int excp)
324 TCGv_i32 tmp = tcg_const_i32(excp);
325 gen_helper_exception(cpu_env, tmp);
326 tcg_temp_free_i32(tmp);
329 static void gen_program_exception(DisasContext *s, int code)
331 TCGv_i32 tmp;
333 /* Remember what pgm exeption this was. */
334 tmp = tcg_const_i32(code);
335 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
336 tcg_temp_free_i32(tmp);
338 tmp = tcg_const_i32(s->next_pc - s->pc);
339 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
340 tcg_temp_free_i32(tmp);
342 /* Advance past instruction. */
343 s->pc = s->next_pc;
344 update_psw_addr(s);
346 /* Save off cc. */
347 update_cc_op(s);
349 /* Trigger exception. */
350 gen_exception(EXCP_PGM);
353 static inline void gen_illegal_opcode(DisasContext *s)
355 gen_program_exception(s, PGM_OPERATION);
358 static inline void gen_trap(DisasContext *s)
360 TCGv_i32 t;
362 /* Set DXC to 0xff. */
363 t = tcg_temp_new_i32();
364 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
365 tcg_gen_ori_i32(t, t, 0xff00);
366 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
367 tcg_temp_free_i32(t);
369 gen_program_exception(s, PGM_DATA);
372 #ifndef CONFIG_USER_ONLY
373 static void check_privileged(DisasContext *s)
375 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
376 gen_program_exception(s, PGM_PRIVILEGED);
379 #endif
381 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
383 TCGv_i64 tmp = tcg_temp_new_i64();
384 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
386 /* Note that d2 is limited to 20 bits, signed. If we crop negative
387 displacements early we create larger immedate addends. */
389 /* Note that addi optimizes the imm==0 case. */
390 if (b2 && x2) {
391 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
392 tcg_gen_addi_i64(tmp, tmp, d2);
393 } else if (b2) {
394 tcg_gen_addi_i64(tmp, regs[b2], d2);
395 } else if (x2) {
396 tcg_gen_addi_i64(tmp, regs[x2], d2);
397 } else {
398 if (need_31) {
399 d2 &= 0x7fffffff;
400 need_31 = false;
402 tcg_gen_movi_i64(tmp, d2);
404 if (need_31) {
405 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
408 return tmp;
411 static inline bool live_cc_data(DisasContext *s)
413 return (s->cc_op != CC_OP_DYNAMIC
414 && s->cc_op != CC_OP_STATIC
415 && s->cc_op > 3);
418 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
420 if (live_cc_data(s)) {
421 tcg_gen_discard_i64(cc_src);
422 tcg_gen_discard_i64(cc_dst);
423 tcg_gen_discard_i64(cc_vr);
425 s->cc_op = CC_OP_CONST0 + val;
428 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
430 if (live_cc_data(s)) {
431 tcg_gen_discard_i64(cc_src);
432 tcg_gen_discard_i64(cc_vr);
434 tcg_gen_mov_i64(cc_dst, dst);
435 s->cc_op = op;
438 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
439 TCGv_i64 dst)
441 if (live_cc_data(s)) {
442 tcg_gen_discard_i64(cc_vr);
444 tcg_gen_mov_i64(cc_src, src);
445 tcg_gen_mov_i64(cc_dst, dst);
446 s->cc_op = op;
449 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
450 TCGv_i64 dst, TCGv_i64 vr)
452 tcg_gen_mov_i64(cc_src, src);
453 tcg_gen_mov_i64(cc_dst, dst);
454 tcg_gen_mov_i64(cc_vr, vr);
455 s->cc_op = op;
458 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
460 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
463 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
465 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
468 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
470 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
473 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
475 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
478 /* CC value is in env->cc_op */
479 static void set_cc_static(DisasContext *s)
481 if (live_cc_data(s)) {
482 tcg_gen_discard_i64(cc_src);
483 tcg_gen_discard_i64(cc_dst);
484 tcg_gen_discard_i64(cc_vr);
486 s->cc_op = CC_OP_STATIC;
489 /* calculates cc into cc_op */
490 static void gen_op_calc_cc(DisasContext *s)
492 TCGv_i32 local_cc_op;
493 TCGv_i64 dummy;
495 TCGV_UNUSED_I32(local_cc_op);
496 TCGV_UNUSED_I64(dummy);
497 switch (s->cc_op) {
498 default:
499 dummy = tcg_const_i64(0);
500 /* FALLTHRU */
501 case CC_OP_ADD_64:
502 case CC_OP_ADDU_64:
503 case CC_OP_ADDC_64:
504 case CC_OP_SUB_64:
505 case CC_OP_SUBU_64:
506 case CC_OP_SUBB_64:
507 case CC_OP_ADD_32:
508 case CC_OP_ADDU_32:
509 case CC_OP_ADDC_32:
510 case CC_OP_SUB_32:
511 case CC_OP_SUBU_32:
512 case CC_OP_SUBB_32:
513 local_cc_op = tcg_const_i32(s->cc_op);
514 break;
515 case CC_OP_CONST0:
516 case CC_OP_CONST1:
517 case CC_OP_CONST2:
518 case CC_OP_CONST3:
519 case CC_OP_STATIC:
520 case CC_OP_DYNAMIC:
521 break;
524 switch (s->cc_op) {
525 case CC_OP_CONST0:
526 case CC_OP_CONST1:
527 case CC_OP_CONST2:
528 case CC_OP_CONST3:
529 /* s->cc_op is the cc value */
530 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
531 break;
532 case CC_OP_STATIC:
533 /* env->cc_op already is the cc value */
534 break;
535 case CC_OP_NZ:
536 case CC_OP_ABS_64:
537 case CC_OP_NABS_64:
538 case CC_OP_ABS_32:
539 case CC_OP_NABS_32:
540 case CC_OP_LTGT0_32:
541 case CC_OP_LTGT0_64:
542 case CC_OP_COMP_32:
543 case CC_OP_COMP_64:
544 case CC_OP_NZ_F32:
545 case CC_OP_NZ_F64:
546 case CC_OP_FLOGR:
547 /* 1 argument */
548 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
549 break;
550 case CC_OP_ICM:
551 case CC_OP_LTGT_32:
552 case CC_OP_LTGT_64:
553 case CC_OP_LTUGTU_32:
554 case CC_OP_LTUGTU_64:
555 case CC_OP_TM_32:
556 case CC_OP_TM_64:
557 case CC_OP_SLA_32:
558 case CC_OP_SLA_64:
559 case CC_OP_NZ_F128:
560 /* 2 arguments */
561 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
562 break;
563 case CC_OP_ADD_64:
564 case CC_OP_ADDU_64:
565 case CC_OP_ADDC_64:
566 case CC_OP_SUB_64:
567 case CC_OP_SUBU_64:
568 case CC_OP_SUBB_64:
569 case CC_OP_ADD_32:
570 case CC_OP_ADDU_32:
571 case CC_OP_ADDC_32:
572 case CC_OP_SUB_32:
573 case CC_OP_SUBU_32:
574 case CC_OP_SUBB_32:
575 /* 3 arguments */
576 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
577 break;
578 case CC_OP_DYNAMIC:
579 /* unknown operation - assume 3 arguments and cc_op in env */
580 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
581 break;
582 default:
583 tcg_abort();
586 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
587 tcg_temp_free_i32(local_cc_op);
589 if (!TCGV_IS_UNUSED_I64(dummy)) {
590 tcg_temp_free_i64(dummy);
593 /* We now have cc in cc_op as constant */
594 set_cc_static(s);
597 static int use_goto_tb(DisasContext *s, uint64_t dest)
599 /* NOTE: we handle the case where the TB spans two pages here */
600 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
601 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
602 && !s->singlestep_enabled
603 && !(s->tb->cflags & CF_LAST_IO)
604 && !(s->tb->flags & FLAG_MASK_PER));
607 static void account_noninline_branch(DisasContext *s, int cc_op)
609 #ifdef DEBUG_INLINE_BRANCHES
610 inline_branch_miss[cc_op]++;
611 #endif
614 static void account_inline_branch(DisasContext *s, int cc_op)
616 #ifdef DEBUG_INLINE_BRANCHES
617 inline_branch_hit[cc_op]++;
618 #endif
621 /* Table of mask values to comparison codes, given a comparison as input.
622 For such, CC=3 should not be possible. */
623 static const TCGCond ltgt_cond[16] = {
624 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
625 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
626 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
627 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
628 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
629 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
630 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
631 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
634 /* Table of mask values to comparison codes, given a logic op as input.
635 For such, only CC=0 and CC=1 should be possible. */
636 static const TCGCond nz_cond[16] = {
637 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
638 TCG_COND_NEVER, TCG_COND_NEVER,
639 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
640 TCG_COND_NE, TCG_COND_NE,
641 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
642 TCG_COND_EQ, TCG_COND_EQ,
643 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
644 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
647 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
648 details required to generate a TCG comparison. */
649 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
651 TCGCond cond;
652 enum cc_op old_cc_op = s->cc_op;
654 if (mask == 15 || mask == 0) {
655 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
656 c->u.s32.a = cc_op;
657 c->u.s32.b = cc_op;
658 c->g1 = c->g2 = true;
659 c->is_64 = false;
660 return;
663 /* Find the TCG condition for the mask + cc op. */
664 switch (old_cc_op) {
665 case CC_OP_LTGT0_32:
666 case CC_OP_LTGT0_64:
667 case CC_OP_LTGT_32:
668 case CC_OP_LTGT_64:
669 cond = ltgt_cond[mask];
670 if (cond == TCG_COND_NEVER) {
671 goto do_dynamic;
673 account_inline_branch(s, old_cc_op);
674 break;
676 case CC_OP_LTUGTU_32:
677 case CC_OP_LTUGTU_64:
678 cond = tcg_unsigned_cond(ltgt_cond[mask]);
679 if (cond == TCG_COND_NEVER) {
680 goto do_dynamic;
682 account_inline_branch(s, old_cc_op);
683 break;
685 case CC_OP_NZ:
686 cond = nz_cond[mask];
687 if (cond == TCG_COND_NEVER) {
688 goto do_dynamic;
690 account_inline_branch(s, old_cc_op);
691 break;
693 case CC_OP_TM_32:
694 case CC_OP_TM_64:
695 switch (mask) {
696 case 8:
697 cond = TCG_COND_EQ;
698 break;
699 case 4 | 2 | 1:
700 cond = TCG_COND_NE;
701 break;
702 default:
703 goto do_dynamic;
705 account_inline_branch(s, old_cc_op);
706 break;
708 case CC_OP_ICM:
709 switch (mask) {
710 case 8:
711 cond = TCG_COND_EQ;
712 break;
713 case 4 | 2 | 1:
714 case 4 | 2:
715 cond = TCG_COND_NE;
716 break;
717 default:
718 goto do_dynamic;
720 account_inline_branch(s, old_cc_op);
721 break;
723 case CC_OP_FLOGR:
724 switch (mask & 0xa) {
725 case 8: /* src == 0 -> no one bit found */
726 cond = TCG_COND_EQ;
727 break;
728 case 2: /* src != 0 -> one bit found */
729 cond = TCG_COND_NE;
730 break;
731 default:
732 goto do_dynamic;
734 account_inline_branch(s, old_cc_op);
735 break;
737 case CC_OP_ADDU_32:
738 case CC_OP_ADDU_64:
739 switch (mask) {
740 case 8 | 2: /* vr == 0 */
741 cond = TCG_COND_EQ;
742 break;
743 case 4 | 1: /* vr != 0 */
744 cond = TCG_COND_NE;
745 break;
746 case 8 | 4: /* no carry -> vr >= src */
747 cond = TCG_COND_GEU;
748 break;
749 case 2 | 1: /* carry -> vr < src */
750 cond = TCG_COND_LTU;
751 break;
752 default:
753 goto do_dynamic;
755 account_inline_branch(s, old_cc_op);
756 break;
758 case CC_OP_SUBU_32:
759 case CC_OP_SUBU_64:
760 /* Note that CC=0 is impossible; treat it as dont-care. */
761 switch (mask & 7) {
762 case 2: /* zero -> op1 == op2 */
763 cond = TCG_COND_EQ;
764 break;
765 case 4 | 1: /* !zero -> op1 != op2 */
766 cond = TCG_COND_NE;
767 break;
768 case 4: /* borrow (!carry) -> op1 < op2 */
769 cond = TCG_COND_LTU;
770 break;
771 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
772 cond = TCG_COND_GEU;
773 break;
774 default:
775 goto do_dynamic;
777 account_inline_branch(s, old_cc_op);
778 break;
780 default:
781 do_dynamic:
782 /* Calculate cc value. */
783 gen_op_calc_cc(s);
784 /* FALLTHRU */
786 case CC_OP_STATIC:
787 /* Jump based on CC. We'll load up the real cond below;
788 the assignment here merely avoids a compiler warning. */
789 account_noninline_branch(s, old_cc_op);
790 old_cc_op = CC_OP_STATIC;
791 cond = TCG_COND_NEVER;
792 break;
795 /* Load up the arguments of the comparison. */
796 c->is_64 = true;
797 c->g1 = c->g2 = false;
798 switch (old_cc_op) {
799 case CC_OP_LTGT0_32:
800 c->is_64 = false;
801 c->u.s32.a = tcg_temp_new_i32();
802 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
803 c->u.s32.b = tcg_const_i32(0);
804 break;
805 case CC_OP_LTGT_32:
806 case CC_OP_LTUGTU_32:
807 case CC_OP_SUBU_32:
808 c->is_64 = false;
809 c->u.s32.a = tcg_temp_new_i32();
810 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
811 c->u.s32.b = tcg_temp_new_i32();
812 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
813 break;
815 case CC_OP_LTGT0_64:
816 case CC_OP_NZ:
817 case CC_OP_FLOGR:
818 c->u.s64.a = cc_dst;
819 c->u.s64.b = tcg_const_i64(0);
820 c->g1 = true;
821 break;
822 case CC_OP_LTGT_64:
823 case CC_OP_LTUGTU_64:
824 case CC_OP_SUBU_64:
825 c->u.s64.a = cc_src;
826 c->u.s64.b = cc_dst;
827 c->g1 = c->g2 = true;
828 break;
830 case CC_OP_TM_32:
831 case CC_OP_TM_64:
832 case CC_OP_ICM:
833 c->u.s64.a = tcg_temp_new_i64();
834 c->u.s64.b = tcg_const_i64(0);
835 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
836 break;
838 case CC_OP_ADDU_32:
839 c->is_64 = false;
840 c->u.s32.a = tcg_temp_new_i32();
841 c->u.s32.b = tcg_temp_new_i32();
842 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
843 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
844 tcg_gen_movi_i32(c->u.s32.b, 0);
845 } else {
846 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
848 break;
850 case CC_OP_ADDU_64:
851 c->u.s64.a = cc_vr;
852 c->g1 = true;
853 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
854 c->u.s64.b = tcg_const_i64(0);
855 } else {
856 c->u.s64.b = cc_src;
857 c->g2 = true;
859 break;
861 case CC_OP_STATIC:
862 c->is_64 = false;
863 c->u.s32.a = cc_op;
864 c->g1 = true;
865 switch (mask) {
866 case 0x8 | 0x4 | 0x2: /* cc != 3 */
867 cond = TCG_COND_NE;
868 c->u.s32.b = tcg_const_i32(3);
869 break;
870 case 0x8 | 0x4 | 0x1: /* cc != 2 */
871 cond = TCG_COND_NE;
872 c->u.s32.b = tcg_const_i32(2);
873 break;
874 case 0x8 | 0x2 | 0x1: /* cc != 1 */
875 cond = TCG_COND_NE;
876 c->u.s32.b = tcg_const_i32(1);
877 break;
878 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
879 cond = TCG_COND_EQ;
880 c->g1 = false;
881 c->u.s32.a = tcg_temp_new_i32();
882 c->u.s32.b = tcg_const_i32(0);
883 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
884 break;
885 case 0x8 | 0x4: /* cc < 2 */
886 cond = TCG_COND_LTU;
887 c->u.s32.b = tcg_const_i32(2);
888 break;
889 case 0x8: /* cc == 0 */
890 cond = TCG_COND_EQ;
891 c->u.s32.b = tcg_const_i32(0);
892 break;
893 case 0x4 | 0x2 | 0x1: /* cc != 0 */
894 cond = TCG_COND_NE;
895 c->u.s32.b = tcg_const_i32(0);
896 break;
897 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
898 cond = TCG_COND_NE;
899 c->g1 = false;
900 c->u.s32.a = tcg_temp_new_i32();
901 c->u.s32.b = tcg_const_i32(0);
902 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
903 break;
904 case 0x4: /* cc == 1 */
905 cond = TCG_COND_EQ;
906 c->u.s32.b = tcg_const_i32(1);
907 break;
908 case 0x2 | 0x1: /* cc > 1 */
909 cond = TCG_COND_GTU;
910 c->u.s32.b = tcg_const_i32(1);
911 break;
912 case 0x2: /* cc == 2 */
913 cond = TCG_COND_EQ;
914 c->u.s32.b = tcg_const_i32(2);
915 break;
916 case 0x1: /* cc == 3 */
917 cond = TCG_COND_EQ;
918 c->u.s32.b = tcg_const_i32(3);
919 break;
920 default:
921 /* CC is masked by something else: (8 >> cc) & mask. */
922 cond = TCG_COND_NE;
923 c->g1 = false;
924 c->u.s32.a = tcg_const_i32(8);
925 c->u.s32.b = tcg_const_i32(0);
926 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
927 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
928 break;
930 break;
932 default:
933 abort();
935 c->cond = cond;
938 static void free_compare(DisasCompare *c)
940 if (!c->g1) {
941 if (c->is_64) {
942 tcg_temp_free_i64(c->u.s64.a);
943 } else {
944 tcg_temp_free_i32(c->u.s32.a);
947 if (!c->g2) {
948 if (c->is_64) {
949 tcg_temp_free_i64(c->u.s64.b);
950 } else {
951 tcg_temp_free_i32(c->u.s32.b);
956 /* ====================================================================== */
957 /* Define the insn format enumeration. */
958 #define F0(N) FMT_##N,
959 #define F1(N, X1) F0(N)
960 #define F2(N, X1, X2) F0(N)
961 #define F3(N, X1, X2, X3) F0(N)
962 #define F4(N, X1, X2, X3, X4) F0(N)
963 #define F5(N, X1, X2, X3, X4, X5) F0(N)
965 typedef enum {
966 #include "insn-format.def"
967 } DisasFormat;
969 #undef F0
970 #undef F1
971 #undef F2
972 #undef F3
973 #undef F4
974 #undef F5
976 /* Define a structure to hold the decoded fields. We'll store each inside
977 an array indexed by an enum. In order to conserve memory, we'll arrange
978 for fields that do not exist at the same time to overlap, thus the "C"
979 for compact. For checking purposes there is an "O" for original index
980 as well that will be applied to availability bitmaps. */
982 enum DisasFieldIndexO {
983 FLD_O_r1,
984 FLD_O_r2,
985 FLD_O_r3,
986 FLD_O_m1,
987 FLD_O_m3,
988 FLD_O_m4,
989 FLD_O_b1,
990 FLD_O_b2,
991 FLD_O_b4,
992 FLD_O_d1,
993 FLD_O_d2,
994 FLD_O_d4,
995 FLD_O_x2,
996 FLD_O_l1,
997 FLD_O_l2,
998 FLD_O_i1,
999 FLD_O_i2,
1000 FLD_O_i3,
1001 FLD_O_i4,
1002 FLD_O_i5
1005 enum DisasFieldIndexC {
1006 FLD_C_r1 = 0,
1007 FLD_C_m1 = 0,
1008 FLD_C_b1 = 0,
1009 FLD_C_i1 = 0,
1011 FLD_C_r2 = 1,
1012 FLD_C_b2 = 1,
1013 FLD_C_i2 = 1,
1015 FLD_C_r3 = 2,
1016 FLD_C_m3 = 2,
1017 FLD_C_i3 = 2,
1019 FLD_C_m4 = 3,
1020 FLD_C_b4 = 3,
1021 FLD_C_i4 = 3,
1022 FLD_C_l1 = 3,
1024 FLD_C_i5 = 4,
1025 FLD_C_d1 = 4,
1027 FLD_C_d2 = 5,
1029 FLD_C_d4 = 6,
1030 FLD_C_x2 = 6,
1031 FLD_C_l2 = 6,
1033 NUM_C_FIELD = 7
1036 struct DisasFields {
1037 uint64_t raw_insn;
1038 unsigned op:8;
1039 unsigned op2:8;
1040 unsigned presentC:16;
1041 unsigned int presentO;
1042 int c[NUM_C_FIELD];
1045 /* This is the way fields are to be accessed out of DisasFields. */
1046 #define have_field(S, F) have_field1((S), FLD_O_##F)
1047 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1049 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1051 return (f->presentO >> c) & 1;
1054 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1055 enum DisasFieldIndexC c)
1057 assert(have_field1(f, o));
1058 return f->c[c];
1061 /* Describe the layout of each field in each format. */
1062 typedef struct DisasField {
1063 unsigned int beg:8;
1064 unsigned int size:8;
1065 unsigned int type:2;
1066 unsigned int indexC:6;
1067 enum DisasFieldIndexO indexO:8;
1068 } DisasField;
1070 typedef struct DisasFormatInfo {
1071 DisasField op[NUM_C_FIELD];
1072 } DisasFormatInfo;
1074 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1075 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1076 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1077 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1078 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1079 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1080 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1081 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1082 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1083 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1084 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1085 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1086 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1087 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1089 #define F0(N) { { } },
1090 #define F1(N, X1) { { X1 } },
1091 #define F2(N, X1, X2) { { X1, X2 } },
1092 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1093 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1094 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1096 static const DisasFormatInfo format_info[] = {
1097 #include "insn-format.def"
1100 #undef F0
1101 #undef F1
1102 #undef F2
1103 #undef F3
1104 #undef F4
1105 #undef F5
1106 #undef R
1107 #undef M
1108 #undef BD
1109 #undef BXD
1110 #undef BDL
1111 #undef BXDL
1112 #undef I
1113 #undef L
1115 /* Generally, we'll extract operands into this structures, operate upon
1116 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1117 of routines below for more details. */
1118 typedef struct {
1119 bool g_out, g_out2, g_in1, g_in2;
1120 TCGv_i64 out, out2, in1, in2;
1121 TCGv_i64 addr1;
1122 } DisasOps;
1124 /* Instructions can place constraints on their operands, raising specification
1125 exceptions if they are violated. To make this easy to automate, each "in1",
1126 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1127 of the following, or 0. To make this easy to document, we'll put the
1128 SPEC_<name> defines next to <name>. */
1130 #define SPEC_r1_even 1
1131 #define SPEC_r2_even 2
1132 #define SPEC_r3_even 4
1133 #define SPEC_r1_f128 8
1134 #define SPEC_r2_f128 16
1136 /* Return values from translate_one, indicating the state of the TB. */
1137 typedef enum {
1138 /* Continue the TB. */
1139 NO_EXIT,
1140 /* We have emitted one or more goto_tb. No fixup required. */
1141 EXIT_GOTO_TB,
1142 /* We are not using a goto_tb (for whatever reason), but have updated
1143 the PC (for whatever reason), so there's no need to do it again on
1144 exiting the TB. */
1145 EXIT_PC_UPDATED,
1146 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1147 updated the PC for the next instruction to be executed. */
1148 EXIT_PC_STALE,
1149 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1150 No following code will be executed. */
1151 EXIT_NORETURN,
1152 } ExitStatus;
1154 typedef enum DisasFacility {
1155 FAC_Z, /* zarch (default) */
1156 FAC_CASS, /* compare and swap and store */
1157 FAC_CASS2, /* compare and swap and store 2*/
1158 FAC_DFP, /* decimal floating point */
1159 FAC_DFPR, /* decimal floating point rounding */
1160 FAC_DO, /* distinct operands */
1161 FAC_EE, /* execute extensions */
1162 FAC_EI, /* extended immediate */
1163 FAC_FPE, /* floating point extension */
1164 FAC_FPSSH, /* floating point support sign handling */
1165 FAC_FPRGR, /* FPR-GR transfer */
1166 FAC_GIE, /* general instructions extension */
1167 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1168 FAC_HW, /* high-word */
1169 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1170 FAC_MIE, /* miscellaneous-instruction-extensions */
1171 FAC_LAT, /* load-and-trap */
1172 FAC_LOC, /* load/store on condition */
1173 FAC_LD, /* long displacement */
1174 FAC_PC, /* population count */
1175 FAC_SCF, /* store clock fast */
1176 FAC_SFLE, /* store facility list extended */
1177 FAC_ILA, /* interlocked access facility 1 */
1178 } DisasFacility;
1180 struct DisasInsn {
1181 unsigned opc:16;
1182 DisasFormat fmt:8;
1183 DisasFacility fac:8;
1184 unsigned spec:8;
1186 const char *name;
1188 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1189 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1190 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1191 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1192 void (*help_cout)(DisasContext *, DisasOps *);
1193 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1195 uint64_t data;
1198 /* ====================================================================== */
1199 /* Miscellaneous helpers, used by several operations. */
1201 static void help_l2_shift(DisasContext *s, DisasFields *f,
1202 DisasOps *o, int mask)
1204 int b2 = get_field(f, b2);
1205 int d2 = get_field(f, d2);
1207 if (b2 == 0) {
1208 o->in2 = tcg_const_i64(d2 & mask);
1209 } else {
1210 o->in2 = get_address(s, 0, b2, d2);
1211 tcg_gen_andi_i64(o->in2, o->in2, mask);
1215 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1217 if (dest == s->next_pc) {
1218 per_branch(s, true);
1219 return NO_EXIT;
1221 if (use_goto_tb(s, dest)) {
1222 update_cc_op(s);
1223 tcg_gen_goto_tb(0);
1224 tcg_gen_movi_i64(psw_addr, dest);
1225 tcg_gen_exit_tb((uintptr_t)s->tb);
1226 return EXIT_GOTO_TB;
1227 } else {
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 per_branch(s, false);
1230 return EXIT_PC_UPDATED;
1234 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1235 bool is_imm, int imm, TCGv_i64 cdest)
1237 ExitStatus ret;
1238 uint64_t dest = s->pc + 2 * imm;
1239 TCGLabel *lab;
1241 /* Take care of the special cases first. */
1242 if (c->cond == TCG_COND_NEVER) {
1243 ret = NO_EXIT;
1244 goto egress;
1246 if (is_imm) {
1247 if (dest == s->next_pc) {
1248 /* Branch to next. */
1249 per_branch(s, true);
1250 ret = NO_EXIT;
1251 goto egress;
1253 if (c->cond == TCG_COND_ALWAYS) {
1254 ret = help_goto_direct(s, dest);
1255 goto egress;
1257 } else {
1258 if (TCGV_IS_UNUSED_I64(cdest)) {
1259 /* E.g. bcr %r0 -> no branch. */
1260 ret = NO_EXIT;
1261 goto egress;
1263 if (c->cond == TCG_COND_ALWAYS) {
1264 tcg_gen_mov_i64(psw_addr, cdest);
1265 per_branch(s, false);
1266 ret = EXIT_PC_UPDATED;
1267 goto egress;
1271 if (use_goto_tb(s, s->next_pc)) {
1272 if (is_imm && use_goto_tb(s, dest)) {
1273 /* Both exits can use goto_tb. */
1274 update_cc_op(s);
1276 lab = gen_new_label();
1277 if (c->is_64) {
1278 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1279 } else {
1280 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1283 /* Branch not taken. */
1284 tcg_gen_goto_tb(0);
1285 tcg_gen_movi_i64(psw_addr, s->next_pc);
1286 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1288 /* Branch taken. */
1289 gen_set_label(lab);
1290 tcg_gen_goto_tb(1);
1291 tcg_gen_movi_i64(psw_addr, dest);
1292 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1294 ret = EXIT_GOTO_TB;
1295 } else {
1296 /* Fallthru can use goto_tb, but taken branch cannot. */
1297 /* Store taken branch destination before the brcond. This
1298 avoids having to allocate a new local temp to hold it.
1299 We'll overwrite this in the not taken case anyway. */
1300 if (!is_imm) {
1301 tcg_gen_mov_i64(psw_addr, cdest);
1304 lab = gen_new_label();
1305 if (c->is_64) {
1306 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1307 } else {
1308 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1311 /* Branch not taken. */
1312 update_cc_op(s);
1313 tcg_gen_goto_tb(0);
1314 tcg_gen_movi_i64(psw_addr, s->next_pc);
1315 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1317 gen_set_label(lab);
1318 if (is_imm) {
1319 tcg_gen_movi_i64(psw_addr, dest);
1321 ret = EXIT_PC_UPDATED;
1323 } else {
1324 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1325 Most commonly we're single-stepping or some other condition that
1326 disables all use of goto_tb. Just update the PC and exit. */
1328 TCGv_i64 next = tcg_const_i64(s->next_pc);
1329 if (is_imm) {
1330 cdest = tcg_const_i64(dest);
1333 if (c->is_64) {
1334 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1335 cdest, next);
1336 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1337 } else {
1338 TCGv_i32 t0 = tcg_temp_new_i32();
1339 TCGv_i64 t1 = tcg_temp_new_i64();
1340 TCGv_i64 z = tcg_const_i64(0);
1341 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1342 tcg_gen_extu_i32_i64(t1, t0);
1343 tcg_temp_free_i32(t0);
1344 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1345 per_branch_cond(s, TCG_COND_NE, t1, z);
1346 tcg_temp_free_i64(t1);
1347 tcg_temp_free_i64(z);
1350 if (is_imm) {
1351 tcg_temp_free_i64(cdest);
1353 tcg_temp_free_i64(next);
1355 ret = EXIT_PC_UPDATED;
1358 egress:
1359 free_compare(c);
1360 return ret;
1363 /* ====================================================================== */
1364 /* The operations. These perform the bulk of the work for any insn,
1365 usually after the operands have been loaded and output initialized. */
1367 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1369 TCGv_i64 z, n;
1370 z = tcg_const_i64(0);
1371 n = tcg_temp_new_i64();
1372 tcg_gen_neg_i64(n, o->in2);
1373 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1374 tcg_temp_free_i64(n);
1375 tcg_temp_free_i64(z);
1376 return NO_EXIT;
1379 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1381 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1382 return NO_EXIT;
1385 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1387 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1388 return NO_EXIT;
1391 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1393 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1394 tcg_gen_mov_i64(o->out2, o->in2);
1395 return NO_EXIT;
1398 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1400 tcg_gen_add_i64(o->out, o->in1, o->in2);
1401 return NO_EXIT;
1404 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1406 DisasCompare cmp;
1407 TCGv_i64 carry;
1409 tcg_gen_add_i64(o->out, o->in1, o->in2);
1411 /* The carry flag is the msb of CC, therefore the branch mask that would
1412 create that comparison is 3. Feeding the generated comparison to
1413 setcond produces the carry flag that we desire. */
1414 disas_jcc(s, &cmp, 3);
1415 carry = tcg_temp_new_i64();
1416 if (cmp.is_64) {
1417 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1418 } else {
1419 TCGv_i32 t = tcg_temp_new_i32();
1420 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1421 tcg_gen_extu_i32_i64(carry, t);
1422 tcg_temp_free_i32(t);
1424 free_compare(&cmp);
1426 tcg_gen_add_i64(o->out, o->out, carry);
1427 tcg_temp_free_i64(carry);
1428 return NO_EXIT;
1431 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1433 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1434 return NO_EXIT;
1437 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1439 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1440 return NO_EXIT;
1443 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1445 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1446 return_low128(o->out2);
1447 return NO_EXIT;
1450 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1452 tcg_gen_and_i64(o->out, o->in1, o->in2);
1453 return NO_EXIT;
1456 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1458 int shift = s->insn->data & 0xff;
1459 int size = s->insn->data >> 8;
1460 uint64_t mask = ((1ull << size) - 1) << shift;
1462 assert(!o->g_in2);
1463 tcg_gen_shli_i64(o->in2, o->in2, shift);
1464 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1465 tcg_gen_and_i64(o->out, o->in1, o->in2);
1467 /* Produce the CC from only the bits manipulated. */
1468 tcg_gen_andi_i64(cc_dst, o->out, mask);
1469 set_cc_nz_u64(s, cc_dst);
1470 return NO_EXIT;
1473 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1475 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1476 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1477 tcg_gen_mov_i64(psw_addr, o->in2);
1478 per_branch(s, false);
1479 return EXIT_PC_UPDATED;
1480 } else {
1481 return NO_EXIT;
1485 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1487 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1488 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1491 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1493 int m1 = get_field(s->fields, m1);
1494 bool is_imm = have_field(s->fields, i2);
1495 int imm = is_imm ? get_field(s->fields, i2) : 0;
1496 DisasCompare c;
1498 disas_jcc(s, &c, m1);
1499 return help_branch(s, &c, is_imm, imm, o->in2);
1502 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1504 int r1 = get_field(s->fields, r1);
1505 bool is_imm = have_field(s->fields, i2);
1506 int imm = is_imm ? get_field(s->fields, i2) : 0;
1507 DisasCompare c;
1508 TCGv_i64 t;
1510 c.cond = TCG_COND_NE;
1511 c.is_64 = false;
1512 c.g1 = false;
1513 c.g2 = false;
1515 t = tcg_temp_new_i64();
1516 tcg_gen_subi_i64(t, regs[r1], 1);
1517 store_reg32_i64(r1, t);
1518 c.u.s32.a = tcg_temp_new_i32();
1519 c.u.s32.b = tcg_const_i32(0);
1520 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1521 tcg_temp_free_i64(t);
1523 return help_branch(s, &c, is_imm, imm, o->in2);
1526 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1528 int r1 = get_field(s->fields, r1);
1529 int imm = get_field(s->fields, i2);
1530 DisasCompare c;
1531 TCGv_i64 t;
1533 c.cond = TCG_COND_NE;
1534 c.is_64 = false;
1535 c.g1 = false;
1536 c.g2 = false;
1538 t = tcg_temp_new_i64();
1539 tcg_gen_shri_i64(t, regs[r1], 32);
1540 tcg_gen_subi_i64(t, t, 1);
1541 store_reg32h_i64(r1, t);
1542 c.u.s32.a = tcg_temp_new_i32();
1543 c.u.s32.b = tcg_const_i32(0);
1544 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1545 tcg_temp_free_i64(t);
1547 return help_branch(s, &c, 1, imm, o->in2);
1550 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1552 int r1 = get_field(s->fields, r1);
1553 bool is_imm = have_field(s->fields, i2);
1554 int imm = is_imm ? get_field(s->fields, i2) : 0;
1555 DisasCompare c;
1557 c.cond = TCG_COND_NE;
1558 c.is_64 = true;
1559 c.g1 = true;
1560 c.g2 = false;
1562 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1563 c.u.s64.a = regs[r1];
1564 c.u.s64.b = tcg_const_i64(0);
1566 return help_branch(s, &c, is_imm, imm, o->in2);
1569 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1571 int r1 = get_field(s->fields, r1);
1572 int r3 = get_field(s->fields, r3);
1573 bool is_imm = have_field(s->fields, i2);
1574 int imm = is_imm ? get_field(s->fields, i2) : 0;
1575 DisasCompare c;
1576 TCGv_i64 t;
1578 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1579 c.is_64 = false;
1580 c.g1 = false;
1581 c.g2 = false;
1583 t = tcg_temp_new_i64();
1584 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1585 c.u.s32.a = tcg_temp_new_i32();
1586 c.u.s32.b = tcg_temp_new_i32();
1587 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1588 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1589 store_reg32_i64(r1, t);
1590 tcg_temp_free_i64(t);
1592 return help_branch(s, &c, is_imm, imm, o->in2);
1595 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1597 int r1 = get_field(s->fields, r1);
1598 int r3 = get_field(s->fields, r3);
1599 bool is_imm = have_field(s->fields, i2);
1600 int imm = is_imm ? get_field(s->fields, i2) : 0;
1601 DisasCompare c;
1603 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1604 c.is_64 = true;
1606 if (r1 == (r3 | 1)) {
1607 c.u.s64.b = load_reg(r3 | 1);
1608 c.g2 = false;
1609 } else {
1610 c.u.s64.b = regs[r3 | 1];
1611 c.g2 = true;
1614 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1615 c.u.s64.a = regs[r1];
1616 c.g1 = true;
1618 return help_branch(s, &c, is_imm, imm, o->in2);
1621 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1623 int imm, m3 = get_field(s->fields, m3);
1624 bool is_imm;
1625 DisasCompare c;
1627 c.cond = ltgt_cond[m3];
1628 if (s->insn->data) {
1629 c.cond = tcg_unsigned_cond(c.cond);
1631 c.is_64 = c.g1 = c.g2 = true;
1632 c.u.s64.a = o->in1;
1633 c.u.s64.b = o->in2;
1635 is_imm = have_field(s->fields, i4);
1636 if (is_imm) {
1637 imm = get_field(s->fields, i4);
1638 } else {
1639 imm = 0;
1640 o->out = get_address(s, 0, get_field(s->fields, b4),
1641 get_field(s->fields, d4));
1644 return help_branch(s, &c, is_imm, imm, o->out);
1647 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1649 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1650 set_cc_static(s);
1651 return NO_EXIT;
1654 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1656 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1657 set_cc_static(s);
1658 return NO_EXIT;
1661 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1663 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1664 set_cc_static(s);
1665 return NO_EXIT;
1668 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1670 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1671 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1672 tcg_temp_free_i32(m3);
1673 gen_set_cc_nz_f32(s, o->in2);
1674 return NO_EXIT;
1677 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1679 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1680 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1681 tcg_temp_free_i32(m3);
1682 gen_set_cc_nz_f64(s, o->in2);
1683 return NO_EXIT;
1686 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1688 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1689 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1690 tcg_temp_free_i32(m3);
1691 gen_set_cc_nz_f128(s, o->in1, o->in2);
1692 return NO_EXIT;
1695 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1697 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1698 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1699 tcg_temp_free_i32(m3);
1700 gen_set_cc_nz_f32(s, o->in2);
1701 return NO_EXIT;
1704 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1706 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1707 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1708 tcg_temp_free_i32(m3);
1709 gen_set_cc_nz_f64(s, o->in2);
1710 return NO_EXIT;
1713 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1715 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1716 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1717 tcg_temp_free_i32(m3);
1718 gen_set_cc_nz_f128(s, o->in1, o->in2);
1719 return NO_EXIT;
1722 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1724 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1725 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1726 tcg_temp_free_i32(m3);
1727 gen_set_cc_nz_f32(s, o->in2);
1728 return NO_EXIT;
1731 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1733 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1734 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1735 tcg_temp_free_i32(m3);
1736 gen_set_cc_nz_f64(s, o->in2);
1737 return NO_EXIT;
1740 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1742 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1743 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1744 tcg_temp_free_i32(m3);
1745 gen_set_cc_nz_f128(s, o->in1, o->in2);
1746 return NO_EXIT;
1749 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1751 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1752 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1753 tcg_temp_free_i32(m3);
1754 gen_set_cc_nz_f32(s, o->in2);
1755 return NO_EXIT;
1758 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1760 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1761 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1762 tcg_temp_free_i32(m3);
1763 gen_set_cc_nz_f64(s, o->in2);
1764 return NO_EXIT;
1767 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1769 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1770 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1771 tcg_temp_free_i32(m3);
1772 gen_set_cc_nz_f128(s, o->in1, o->in2);
1773 return NO_EXIT;
1776 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1778 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1779 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1780 tcg_temp_free_i32(m3);
1781 return NO_EXIT;
1784 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1786 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1787 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1788 tcg_temp_free_i32(m3);
1789 return NO_EXIT;
1792 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1794 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1795 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1796 tcg_temp_free_i32(m3);
1797 return_low128(o->out2);
1798 return NO_EXIT;
1801 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1803 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1804 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1805 tcg_temp_free_i32(m3);
1806 return NO_EXIT;
1809 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1811 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1812 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1813 tcg_temp_free_i32(m3);
1814 return NO_EXIT;
1817 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1819 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1820 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1821 tcg_temp_free_i32(m3);
1822 return_low128(o->out2);
1823 return NO_EXIT;
1826 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1828 int r2 = get_field(s->fields, r2);
1829 TCGv_i64 len = tcg_temp_new_i64();
1831 potential_page_fault(s);
1832 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1833 set_cc_static(s);
1834 return_low128(o->out);
1836 tcg_gen_add_i64(regs[r2], regs[r2], len);
1837 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1838 tcg_temp_free_i64(len);
1840 return NO_EXIT;
1843 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1845 int l = get_field(s->fields, l1);
1846 TCGv_i32 vl;
1848 switch (l + 1) {
1849 case 1:
1850 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1851 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1852 break;
1853 case 2:
1854 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1855 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1856 break;
1857 case 4:
1858 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1859 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1860 break;
1861 case 8:
1862 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1863 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1864 break;
1865 default:
1866 potential_page_fault(s);
1867 vl = tcg_const_i32(l);
1868 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1869 tcg_temp_free_i32(vl);
1870 set_cc_static(s);
1871 return NO_EXIT;
1873 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1874 return NO_EXIT;
1877 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1879 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1880 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1881 potential_page_fault(s);
1882 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1883 tcg_temp_free_i32(r1);
1884 tcg_temp_free_i32(r3);
1885 set_cc_static(s);
1886 return NO_EXIT;
1889 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1891 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1892 TCGv_i32 t1 = tcg_temp_new_i32();
1893 tcg_gen_trunc_i64_i32(t1, o->in1);
1894 potential_page_fault(s);
1895 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1896 set_cc_static(s);
1897 tcg_temp_free_i32(t1);
1898 tcg_temp_free_i32(m3);
1899 return NO_EXIT;
1902 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1904 potential_page_fault(s);
1905 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1906 set_cc_static(s);
1907 return_low128(o->in2);
1908 return NO_EXIT;
1911 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1913 TCGv_i64 t = tcg_temp_new_i64();
1914 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1915 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1916 tcg_gen_or_i64(o->out, o->out, t);
1917 tcg_temp_free_i64(t);
1918 return NO_EXIT;
1921 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1923 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1924 int d2 = get_field(s->fields, d2);
1925 int b2 = get_field(s->fields, b2);
1926 int is_64 = s->insn->data;
1927 TCGv_i64 addr, mem, cc, z;
1929 /* Note that in1 = R3 (new value) and
1930 in2 = (zero-extended) R1 (expected value). */
1932 /* Load the memory into the (temporary) output. While the PoO only talks
1933 about moving the memory to R1 on inequality, if we include equality it
1934 means that R1 is equal to the memory in all conditions. */
1935 addr = get_address(s, 0, b2, d2);
1936 if (is_64) {
1937 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1938 } else {
1939 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1942 /* Are the memory and expected values (un)equal? Note that this setcond
1943 produces the output CC value, thus the NE sense of the test. */
1944 cc = tcg_temp_new_i64();
1945 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1947 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1948 Recall that we are allowed to unconditionally issue the store (and
1949 thus any possible write trap), so (re-)store the original contents
1950 of MEM in case of inequality. */
1951 z = tcg_const_i64(0);
1952 mem = tcg_temp_new_i64();
1953 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1954 if (is_64) {
1955 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1956 } else {
1957 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1959 tcg_temp_free_i64(z);
1960 tcg_temp_free_i64(mem);
1961 tcg_temp_free_i64(addr);
1963 /* Store CC back to cc_op. Wait until after the store so that any
1964 exception gets the old cc_op value. */
1965 tcg_gen_trunc_i64_i32(cc_op, cc);
1966 tcg_temp_free_i64(cc);
1967 set_cc_static(s);
1968 return NO_EXIT;
1971 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1973 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1974 int r1 = get_field(s->fields, r1);
1975 int r3 = get_field(s->fields, r3);
1976 int d2 = get_field(s->fields, d2);
1977 int b2 = get_field(s->fields, b2);
1978 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1980 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1982 addrh = get_address(s, 0, b2, d2);
1983 addrl = get_address(s, 0, b2, d2 + 8);
1984 outh = tcg_temp_new_i64();
1985 outl = tcg_temp_new_i64();
1987 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1988 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1990 /* Fold the double-word compare with arithmetic. */
1991 cc = tcg_temp_new_i64();
1992 z = tcg_temp_new_i64();
1993 tcg_gen_xor_i64(cc, outh, regs[r1]);
1994 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1995 tcg_gen_or_i64(cc, cc, z);
1996 tcg_gen_movi_i64(z, 0);
1997 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1999 memh = tcg_temp_new_i64();
2000 meml = tcg_temp_new_i64();
2001 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2002 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2003 tcg_temp_free_i64(z);
2005 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2006 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2007 tcg_temp_free_i64(memh);
2008 tcg_temp_free_i64(meml);
2009 tcg_temp_free_i64(addrh);
2010 tcg_temp_free_i64(addrl);
2012 /* Save back state now that we've passed all exceptions. */
2013 tcg_gen_mov_i64(regs[r1], outh);
2014 tcg_gen_mov_i64(regs[r1 + 1], outl);
2015 tcg_gen_trunc_i64_i32(cc_op, cc);
2016 tcg_temp_free_i64(outh);
2017 tcg_temp_free_i64(outl);
2018 tcg_temp_free_i64(cc);
2019 set_cc_static(s);
2020 return NO_EXIT;
2023 #ifndef CONFIG_USER_ONLY
2024 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2026 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2027 check_privileged(s);
2028 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2029 tcg_temp_free_i32(r1);
2030 set_cc_static(s);
2031 return NO_EXIT;
2033 #endif
2035 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2037 TCGv_i64 t1 = tcg_temp_new_i64();
2038 TCGv_i32 t2 = tcg_temp_new_i32();
2039 tcg_gen_trunc_i64_i32(t2, o->in1);
2040 gen_helper_cvd(t1, t2);
2041 tcg_temp_free_i32(t2);
2042 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2043 tcg_temp_free_i64(t1);
2044 return NO_EXIT;
2047 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2049 int m3 = get_field(s->fields, m3);
2050 TCGLabel *lab = gen_new_label();
2051 TCGCond c;
2053 c = tcg_invert_cond(ltgt_cond[m3]);
2054 if (s->insn->data) {
2055 c = tcg_unsigned_cond(c);
2057 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2059 /* Trap. */
2060 gen_trap(s);
2062 gen_set_label(lab);
2063 return NO_EXIT;
2066 #ifndef CONFIG_USER_ONLY
2067 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2069 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2070 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2071 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2073 check_privileged(s);
2074 update_psw_addr(s);
2075 gen_op_calc_cc(s);
2077 gen_helper_diag(cpu_env, r1, r3, func_code);
2079 tcg_temp_free_i32(func_code);
2080 tcg_temp_free_i32(r3);
2081 tcg_temp_free_i32(r1);
2082 return NO_EXIT;
2084 #endif
2086 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2088 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2089 return_low128(o->out);
2090 return NO_EXIT;
2093 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2095 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2096 return_low128(o->out);
2097 return NO_EXIT;
2100 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2102 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2103 return_low128(o->out);
2104 return NO_EXIT;
2107 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2109 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2110 return_low128(o->out);
2111 return NO_EXIT;
2114 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2116 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2117 return NO_EXIT;
2120 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2122 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2123 return NO_EXIT;
2126 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2128 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2129 return_low128(o->out2);
2130 return NO_EXIT;
2133 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2135 int r2 = get_field(s->fields, r2);
2136 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2137 return NO_EXIT;
2140 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2142 /* No cache information provided. */
2143 tcg_gen_movi_i64(o->out, -1);
2144 return NO_EXIT;
2147 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2149 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2150 return NO_EXIT;
2153 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2155 int r1 = get_field(s->fields, r1);
2156 int r2 = get_field(s->fields, r2);
2157 TCGv_i64 t = tcg_temp_new_i64();
2159 /* Note the "subsequently" in the PoO, which implies a defined result
2160 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2161 tcg_gen_shri_i64(t, psw_mask, 32);
2162 store_reg32_i64(r1, t);
2163 if (r2 != 0) {
2164 store_reg32_i64(r2, psw_mask);
2167 tcg_temp_free_i64(t);
2168 return NO_EXIT;
2171 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2173 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2174 tb->flags, (ab)use the tb->cs_base field as the address of
2175 the template in memory, and grab 8 bits of tb->flags/cflags for
2176 the contents of the register. We would then recognize all this
2177 in gen_intermediate_code_internal, generating code for exactly
2178 one instruction. This new TB then gets executed normally.
2180 On the other hand, this seems to be mostly used for modifying
2181 MVC inside of memcpy, which needs a helper call anyway. So
2182 perhaps this doesn't bear thinking about any further. */
2184 TCGv_i64 tmp;
2186 update_psw_addr(s);
2187 gen_op_calc_cc(s);
2189 tmp = tcg_const_i64(s->next_pc);
2190 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2191 tcg_temp_free_i64(tmp);
2193 return NO_EXIT;
2196 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2198 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2199 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2200 tcg_temp_free_i32(m3);
2201 return NO_EXIT;
2204 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2206 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2207 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2208 tcg_temp_free_i32(m3);
2209 return NO_EXIT;
2212 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2214 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2215 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2216 return_low128(o->out2);
2217 tcg_temp_free_i32(m3);
2218 return NO_EXIT;
2221 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2223 /* We'll use the original input for cc computation, since we get to
2224 compare that against 0, which ought to be better than comparing
2225 the real output against 64. It also lets cc_dst be a convenient
2226 temporary during our computation. */
2227 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2229 /* R1 = IN ? CLZ(IN) : 64. */
2230 gen_helper_clz(o->out, o->in2);
2232 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2233 value by 64, which is undefined. But since the shift is 64 iff the
2234 input is zero, we still get the correct result after and'ing. */
2235 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2236 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2237 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2238 return NO_EXIT;
2241 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2243 int m3 = get_field(s->fields, m3);
2244 int pos, len, base = s->insn->data;
2245 TCGv_i64 tmp = tcg_temp_new_i64();
2246 uint64_t ccm;
2248 switch (m3) {
2249 case 0xf:
2250 /* Effectively a 32-bit load. */
2251 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2252 len = 32;
2253 goto one_insert;
2255 case 0xc:
2256 case 0x6:
2257 case 0x3:
2258 /* Effectively a 16-bit load. */
2259 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2260 len = 16;
2261 goto one_insert;
2263 case 0x8:
2264 case 0x4:
2265 case 0x2:
2266 case 0x1:
2267 /* Effectively an 8-bit load. */
2268 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2269 len = 8;
2270 goto one_insert;
2272 one_insert:
2273 pos = base + ctz32(m3) * 8;
2274 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2275 ccm = ((1ull << len) - 1) << pos;
2276 break;
2278 default:
2279 /* This is going to be a sequence of loads and inserts. */
2280 pos = base + 32 - 8;
2281 ccm = 0;
2282 while (m3) {
2283 if (m3 & 0x8) {
2284 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2285 tcg_gen_addi_i64(o->in2, o->in2, 1);
2286 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2287 ccm |= 0xff << pos;
2289 m3 = (m3 << 1) & 0xf;
2290 pos -= 8;
2292 break;
2295 tcg_gen_movi_i64(tmp, ccm);
2296 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2297 tcg_temp_free_i64(tmp);
2298 return NO_EXIT;
2301 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2303 int shift = s->insn->data & 0xff;
2304 int size = s->insn->data >> 8;
2305 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2306 return NO_EXIT;
2309 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2311 TCGv_i64 t1;
2313 gen_op_calc_cc(s);
2314 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2316 t1 = tcg_temp_new_i64();
2317 tcg_gen_shli_i64(t1, psw_mask, 20);
2318 tcg_gen_shri_i64(t1, t1, 36);
2319 tcg_gen_or_i64(o->out, o->out, t1);
2321 tcg_gen_extu_i32_i64(t1, cc_op);
2322 tcg_gen_shli_i64(t1, t1, 28);
2323 tcg_gen_or_i64(o->out, o->out, t1);
2324 tcg_temp_free_i64(t1);
2325 return NO_EXIT;
2328 #ifndef CONFIG_USER_ONLY
2329 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2331 check_privileged(s);
2332 gen_helper_ipte(cpu_env, o->in1, o->in2);
2333 return NO_EXIT;
2336 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2338 check_privileged(s);
2339 gen_helper_iske(o->out, cpu_env, o->in2);
2340 return NO_EXIT;
2342 #endif
2344 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2346 gen_helper_ldeb(o->out, cpu_env, o->in2);
2347 return NO_EXIT;
2350 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2352 gen_helper_ledb(o->out, cpu_env, o->in2);
2353 return NO_EXIT;
2356 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2358 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2359 return NO_EXIT;
2362 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2364 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2365 return NO_EXIT;
2368 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2370 gen_helper_lxdb(o->out, cpu_env, o->in2);
2371 return_low128(o->out2);
2372 return NO_EXIT;
2375 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2377 gen_helper_lxeb(o->out, cpu_env, o->in2);
2378 return_low128(o->out2);
2379 return NO_EXIT;
2382 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2384 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2385 return NO_EXIT;
2388 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2390 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2391 return NO_EXIT;
2394 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2396 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2397 return NO_EXIT;
2400 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2402 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2403 return NO_EXIT;
2406 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2408 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2409 return NO_EXIT;
2412 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2414 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2415 return NO_EXIT;
2418 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2420 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2421 return NO_EXIT;
2424 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2426 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2427 return NO_EXIT;
2430 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2432 TCGLabel *lab = gen_new_label();
2433 store_reg32_i64(get_field(s->fields, r1), o->in2);
2434 /* The value is stored even in case of trap. */
2435 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2436 gen_trap(s);
2437 gen_set_label(lab);
2438 return NO_EXIT;
2441 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2443 TCGLabel *lab = gen_new_label();
2444 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2445 /* The value is stored even in case of trap. */
2446 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2447 gen_trap(s);
2448 gen_set_label(lab);
2449 return NO_EXIT;
2452 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2454 TCGLabel *lab = gen_new_label();
2455 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2456 /* The value is stored even in case of trap. */
2457 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2458 gen_trap(s);
2459 gen_set_label(lab);
2460 return NO_EXIT;
2463 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2465 TCGLabel *lab = gen_new_label();
2466 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2467 /* The value is stored even in case of trap. */
2468 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2469 gen_trap(s);
2470 gen_set_label(lab);
2471 return NO_EXIT;
2474 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2476 TCGLabel *lab = gen_new_label();
2477 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2478 /* The value is stored even in case of trap. */
2479 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2480 gen_trap(s);
2481 gen_set_label(lab);
2482 return NO_EXIT;
2485 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2487 DisasCompare c;
2489 disas_jcc(s, &c, get_field(s->fields, m3));
2491 if (c.is_64) {
2492 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2493 o->in2, o->in1);
2494 free_compare(&c);
2495 } else {
2496 TCGv_i32 t32 = tcg_temp_new_i32();
2497 TCGv_i64 t, z;
2499 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2500 free_compare(&c);
2502 t = tcg_temp_new_i64();
2503 tcg_gen_extu_i32_i64(t, t32);
2504 tcg_temp_free_i32(t32);
2506 z = tcg_const_i64(0);
2507 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2508 tcg_temp_free_i64(t);
2509 tcg_temp_free_i64(z);
2512 return NO_EXIT;
2515 #ifndef CONFIG_USER_ONLY
2516 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2518 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2519 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2520 check_privileged(s);
2521 potential_page_fault(s);
2522 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2523 tcg_temp_free_i32(r1);
2524 tcg_temp_free_i32(r3);
2525 return NO_EXIT;
2528 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2530 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2531 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2532 check_privileged(s);
2533 potential_page_fault(s);
2534 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2535 tcg_temp_free_i32(r1);
2536 tcg_temp_free_i32(r3);
2537 return NO_EXIT;
2539 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2541 check_privileged(s);
2542 potential_page_fault(s);
2543 gen_helper_lra(o->out, cpu_env, o->in2);
2544 set_cc_static(s);
2545 return NO_EXIT;
2548 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2550 TCGv_i64 t1, t2;
2552 check_privileged(s);
2554 t1 = tcg_temp_new_i64();
2555 t2 = tcg_temp_new_i64();
2556 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2557 tcg_gen_addi_i64(o->in2, o->in2, 4);
2558 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2559 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2560 tcg_gen_shli_i64(t1, t1, 32);
2561 gen_helper_load_psw(cpu_env, t1, t2);
2562 tcg_temp_free_i64(t1);
2563 tcg_temp_free_i64(t2);
2564 return EXIT_NORETURN;
2567 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2569 TCGv_i64 t1, t2;
2571 check_privileged(s);
2573 t1 = tcg_temp_new_i64();
2574 t2 = tcg_temp_new_i64();
2575 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2576 tcg_gen_addi_i64(o->in2, o->in2, 8);
2577 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2578 gen_helper_load_psw(cpu_env, t1, t2);
2579 tcg_temp_free_i64(t1);
2580 tcg_temp_free_i64(t2);
2581 return EXIT_NORETURN;
2583 #endif
2585 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2587 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2588 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2589 potential_page_fault(s);
2590 gen_helper_lam(cpu_env, r1, o->in2, r3);
2591 tcg_temp_free_i32(r1);
2592 tcg_temp_free_i32(r3);
2593 return NO_EXIT;
2596 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2598 int r1 = get_field(s->fields, r1);
2599 int r3 = get_field(s->fields, r3);
2600 TCGv_i64 t1, t2;
2602 /* Only one register to read. */
2603 t1 = tcg_temp_new_i64();
2604 if (unlikely(r1 == r3)) {
2605 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2606 store_reg32_i64(r1, t1);
2607 tcg_temp_free(t1);
2608 return NO_EXIT;
2611 /* First load the values of the first and last registers to trigger
2612 possible page faults. */
2613 t2 = tcg_temp_new_i64();
2614 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2615 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2616 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2617 store_reg32_i64(r1, t1);
2618 store_reg32_i64(r3, t2);
2620 /* Only two registers to read. */
2621 if (((r1 + 1) & 15) == r3) {
2622 tcg_temp_free(t2);
2623 tcg_temp_free(t1);
2624 return NO_EXIT;
2627 /* Then load the remaining registers. Page fault can't occur. */
2628 r3 = (r3 - 1) & 15;
2629 tcg_gen_movi_i64(t2, 4);
2630 while (r1 != r3) {
2631 r1 = (r1 + 1) & 15;
2632 tcg_gen_add_i64(o->in2, o->in2, t2);
2633 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2634 store_reg32_i64(r1, t1);
2636 tcg_temp_free(t2);
2637 tcg_temp_free(t1);
2639 return NO_EXIT;
2642 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2644 int r1 = get_field(s->fields, r1);
2645 int r3 = get_field(s->fields, r3);
2646 TCGv_i64 t1, t2;
2648 /* Only one register to read. */
2649 t1 = tcg_temp_new_i64();
2650 if (unlikely(r1 == r3)) {
2651 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2652 store_reg32h_i64(r1, t1);
2653 tcg_temp_free(t1);
2654 return NO_EXIT;
2657 /* First load the values of the first and last registers to trigger
2658 possible page faults. */
2659 t2 = tcg_temp_new_i64();
2660 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2661 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2662 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2663 store_reg32h_i64(r1, t1);
2664 store_reg32h_i64(r3, t2);
2666 /* Only two registers to read. */
2667 if (((r1 + 1) & 15) == r3) {
2668 tcg_temp_free(t2);
2669 tcg_temp_free(t1);
2670 return NO_EXIT;
2673 /* Then load the remaining registers. Page fault can't occur. */
2674 r3 = (r3 - 1) & 15;
2675 tcg_gen_movi_i64(t2, 4);
2676 while (r1 != r3) {
2677 r1 = (r1 + 1) & 15;
2678 tcg_gen_add_i64(o->in2, o->in2, t2);
2679 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2680 store_reg32h_i64(r1, t1);
2682 tcg_temp_free(t2);
2683 tcg_temp_free(t1);
2685 return NO_EXIT;
2688 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2690 int r1 = get_field(s->fields, r1);
2691 int r3 = get_field(s->fields, r3);
2692 TCGv_i64 t1, t2;
2694 /* Only one register to read. */
2695 if (unlikely(r1 == r3)) {
2696 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2697 return NO_EXIT;
2700 /* First load the values of the first and last registers to trigger
2701 possible page faults. */
2702 t1 = tcg_temp_new_i64();
2703 t2 = tcg_temp_new_i64();
2704 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2705 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2706 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2707 tcg_gen_mov_i64(regs[r1], t1);
2708 tcg_temp_free(t2);
2710 /* Only two registers to read. */
2711 if (((r1 + 1) & 15) == r3) {
2712 tcg_temp_free(t1);
2713 return NO_EXIT;
2716 /* Then load the remaining registers. Page fault can't occur. */
2717 r3 = (r3 - 1) & 15;
2718 tcg_gen_movi_i64(t1, 8);
2719 while (r1 != r3) {
2720 r1 = (r1 + 1) & 15;
2721 tcg_gen_add_i64(o->in2, o->in2, t1);
2722 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2724 tcg_temp_free(t1);
2726 return NO_EXIT;
2729 #ifndef CONFIG_USER_ONLY
2730 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2732 check_privileged(s);
2733 potential_page_fault(s);
2734 gen_helper_lura(o->out, cpu_env, o->in2);
2735 return NO_EXIT;
2738 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2740 check_privileged(s);
2741 potential_page_fault(s);
2742 gen_helper_lurag(o->out, cpu_env, o->in2);
2743 return NO_EXIT;
2745 #endif
2747 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2749 o->out = o->in2;
2750 o->g_out = o->g_in2;
2751 TCGV_UNUSED_I64(o->in2);
2752 o->g_in2 = false;
2753 return NO_EXIT;
2756 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2758 int b2 = get_field(s->fields, b2);
2759 TCGv ar1 = tcg_temp_new_i64();
2761 o->out = o->in2;
2762 o->g_out = o->g_in2;
2763 TCGV_UNUSED_I64(o->in2);
2764 o->g_in2 = false;
2766 switch (s->tb->flags & FLAG_MASK_ASC) {
2767 case PSW_ASC_PRIMARY >> 32:
2768 tcg_gen_movi_i64(ar1, 0);
2769 break;
2770 case PSW_ASC_ACCREG >> 32:
2771 tcg_gen_movi_i64(ar1, 1);
2772 break;
2773 case PSW_ASC_SECONDARY >> 32:
2774 if (b2) {
2775 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2776 } else {
2777 tcg_gen_movi_i64(ar1, 0);
2779 break;
2780 case PSW_ASC_HOME >> 32:
2781 tcg_gen_movi_i64(ar1, 2);
2782 break;
2785 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2786 tcg_temp_free_i64(ar1);
2788 return NO_EXIT;
2791 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2793 o->out = o->in1;
2794 o->out2 = o->in2;
2795 o->g_out = o->g_in1;
2796 o->g_out2 = o->g_in2;
2797 TCGV_UNUSED_I64(o->in1);
2798 TCGV_UNUSED_I64(o->in2);
2799 o->g_in1 = o->g_in2 = false;
2800 return NO_EXIT;
2803 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2805 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2806 potential_page_fault(s);
2807 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2808 tcg_temp_free_i32(l);
2809 return NO_EXIT;
2812 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2814 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2815 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2816 potential_page_fault(s);
2817 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2818 tcg_temp_free_i32(r1);
2819 tcg_temp_free_i32(r2);
2820 set_cc_static(s);
2821 return NO_EXIT;
2824 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2826 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2827 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2828 potential_page_fault(s);
2829 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2830 tcg_temp_free_i32(r1);
2831 tcg_temp_free_i32(r3);
2832 set_cc_static(s);
2833 return NO_EXIT;
2836 #ifndef CONFIG_USER_ONLY
2837 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2839 int r1 = get_field(s->fields, l1);
2840 check_privileged(s);
2841 potential_page_fault(s);
2842 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2843 set_cc_static(s);
2844 return NO_EXIT;
2847 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2849 int r1 = get_field(s->fields, l1);
2850 check_privileged(s);
2851 potential_page_fault(s);
2852 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2853 set_cc_static(s);
2854 return NO_EXIT;
2856 #endif
2858 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2860 potential_page_fault(s);
2861 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2862 set_cc_static(s);
2863 return NO_EXIT;
2866 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2868 potential_page_fault(s);
2869 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2870 set_cc_static(s);
2871 return_low128(o->in2);
2872 return NO_EXIT;
2875 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2877 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2878 return NO_EXIT;
2881 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2883 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2884 return NO_EXIT;
2887 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2889 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2890 return NO_EXIT;
2893 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2895 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2896 return NO_EXIT;
2899 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2901 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2902 return NO_EXIT;
2905 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2907 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2908 return_low128(o->out2);
2909 return NO_EXIT;
2912 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2914 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2915 return_low128(o->out2);
2916 return NO_EXIT;
2919 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2921 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2922 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2923 tcg_temp_free_i64(r3);
2924 return NO_EXIT;
2927 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2929 int r3 = get_field(s->fields, r3);
2930 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2931 return NO_EXIT;
2934 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2936 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2937 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2938 tcg_temp_free_i64(r3);
2939 return NO_EXIT;
2942 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2944 int r3 = get_field(s->fields, r3);
2945 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2946 return NO_EXIT;
2949 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2951 TCGv_i64 z, n;
2952 z = tcg_const_i64(0);
2953 n = tcg_temp_new_i64();
2954 tcg_gen_neg_i64(n, o->in2);
2955 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2956 tcg_temp_free_i64(n);
2957 tcg_temp_free_i64(z);
2958 return NO_EXIT;
2961 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2963 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2964 return NO_EXIT;
2967 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2969 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2970 return NO_EXIT;
2973 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2975 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2976 tcg_gen_mov_i64(o->out2, o->in2);
2977 return NO_EXIT;
2980 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2982 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2983 potential_page_fault(s);
2984 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2985 tcg_temp_free_i32(l);
2986 set_cc_static(s);
2987 return NO_EXIT;
2990 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2992 tcg_gen_neg_i64(o->out, o->in2);
2993 return NO_EXIT;
2996 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2998 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2999 return NO_EXIT;
3002 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3004 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3005 return NO_EXIT;
3008 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3010 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3011 tcg_gen_mov_i64(o->out2, o->in2);
3012 return NO_EXIT;
3015 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3017 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3018 potential_page_fault(s);
3019 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3020 tcg_temp_free_i32(l);
3021 set_cc_static(s);
3022 return NO_EXIT;
3025 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3027 tcg_gen_or_i64(o->out, o->in1, o->in2);
3028 return NO_EXIT;
3031 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3033 int shift = s->insn->data & 0xff;
3034 int size = s->insn->data >> 8;
3035 uint64_t mask = ((1ull << size) - 1) << shift;
3037 assert(!o->g_in2);
3038 tcg_gen_shli_i64(o->in2, o->in2, shift);
3039 tcg_gen_or_i64(o->out, o->in1, o->in2);
3041 /* Produce the CC from only the bits manipulated. */
3042 tcg_gen_andi_i64(cc_dst, o->out, mask);
3043 set_cc_nz_u64(s, cc_dst);
3044 return NO_EXIT;
3047 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3049 gen_helper_popcnt(o->out, o->in2);
3050 return NO_EXIT;
3053 #ifndef CONFIG_USER_ONLY
3054 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3056 check_privileged(s);
3057 gen_helper_ptlb(cpu_env);
3058 return NO_EXIT;
3060 #endif
3062 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3064 int i3 = get_field(s->fields, i3);
3065 int i4 = get_field(s->fields, i4);
3066 int i5 = get_field(s->fields, i5);
3067 int do_zero = i4 & 0x80;
3068 uint64_t mask, imask, pmask;
3069 int pos, len, rot;
3071 /* Adjust the arguments for the specific insn. */
3072 switch (s->fields->op2) {
3073 case 0x55: /* risbg */
3074 i3 &= 63;
3075 i4 &= 63;
3076 pmask = ~0;
3077 break;
3078 case 0x5d: /* risbhg */
3079 i3 &= 31;
3080 i4 &= 31;
3081 pmask = 0xffffffff00000000ull;
3082 break;
3083 case 0x51: /* risblg */
3084 i3 &= 31;
3085 i4 &= 31;
3086 pmask = 0x00000000ffffffffull;
3087 break;
3088 default:
3089 abort();
3092 /* MASK is the set of bits to be inserted from R2.
3093 Take care for I3/I4 wraparound. */
3094 mask = pmask >> i3;
3095 if (i3 <= i4) {
3096 mask ^= pmask >> i4 >> 1;
3097 } else {
3098 mask |= ~(pmask >> i4 >> 1);
3100 mask &= pmask;
3102 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3103 insns, we need to keep the other half of the register. */
3104 imask = ~mask | ~pmask;
3105 if (do_zero) {
3106 if (s->fields->op2 == 0x55) {
3107 imask = 0;
3108 } else {
3109 imask = ~pmask;
3113 /* In some cases we can implement this with deposit, which can be more
3114 efficient on some hosts. */
3115 if (~mask == imask && i3 <= i4) {
3116 if (s->fields->op2 == 0x5d) {
3117 i3 += 32, i4 += 32;
3119 /* Note that we rotate the bits to be inserted to the lsb, not to
3120 the position as described in the PoO. */
3121 len = i4 - i3 + 1;
3122 pos = 63 - i4;
3123 rot = (i5 - pos) & 63;
3124 } else {
3125 pos = len = -1;
3126 rot = i5 & 63;
3129 /* Rotate the input as necessary. */
3130 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3132 /* Insert the selected bits into the output. */
3133 if (pos >= 0) {
3134 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3135 } else if (imask == 0) {
3136 tcg_gen_andi_i64(o->out, o->in2, mask);
3137 } else {
3138 tcg_gen_andi_i64(o->in2, o->in2, mask);
3139 tcg_gen_andi_i64(o->out, o->out, imask);
3140 tcg_gen_or_i64(o->out, o->out, o->in2);
3142 return NO_EXIT;
3145 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3147 int i3 = get_field(s->fields, i3);
3148 int i4 = get_field(s->fields, i4);
3149 int i5 = get_field(s->fields, i5);
3150 uint64_t mask;
3152 /* If this is a test-only form, arrange to discard the result. */
3153 if (i3 & 0x80) {
3154 o->out = tcg_temp_new_i64();
3155 o->g_out = false;
3158 i3 &= 63;
3159 i4 &= 63;
3160 i5 &= 63;
3162 /* MASK is the set of bits to be operated on from R2.
3163 Take care for I3/I4 wraparound. */
3164 mask = ~0ull >> i3;
3165 if (i3 <= i4) {
3166 mask ^= ~0ull >> i4 >> 1;
3167 } else {
3168 mask |= ~(~0ull >> i4 >> 1);
3171 /* Rotate the input as necessary. */
3172 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3174 /* Operate. */
3175 switch (s->fields->op2) {
3176 case 0x55: /* AND */
3177 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3178 tcg_gen_and_i64(o->out, o->out, o->in2);
3179 break;
3180 case 0x56: /* OR */
3181 tcg_gen_andi_i64(o->in2, o->in2, mask);
3182 tcg_gen_or_i64(o->out, o->out, o->in2);
3183 break;
3184 case 0x57: /* XOR */
3185 tcg_gen_andi_i64(o->in2, o->in2, mask);
3186 tcg_gen_xor_i64(o->out, o->out, o->in2);
3187 break;
3188 default:
3189 abort();
3192 /* Set the CC. */
3193 tcg_gen_andi_i64(cc_dst, o->out, mask);
3194 set_cc_nz_u64(s, cc_dst);
3195 return NO_EXIT;
3198 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3200 tcg_gen_bswap16_i64(o->out, o->in2);
3201 return NO_EXIT;
3204 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3206 tcg_gen_bswap32_i64(o->out, o->in2);
3207 return NO_EXIT;
3210 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3212 tcg_gen_bswap64_i64(o->out, o->in2);
3213 return NO_EXIT;
3216 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3218 TCGv_i32 t1 = tcg_temp_new_i32();
3219 TCGv_i32 t2 = tcg_temp_new_i32();
3220 TCGv_i32 to = tcg_temp_new_i32();
3221 tcg_gen_trunc_i64_i32(t1, o->in1);
3222 tcg_gen_trunc_i64_i32(t2, o->in2);
3223 tcg_gen_rotl_i32(to, t1, t2);
3224 tcg_gen_extu_i32_i64(o->out, to);
3225 tcg_temp_free_i32(t1);
3226 tcg_temp_free_i32(t2);
3227 tcg_temp_free_i32(to);
3228 return NO_EXIT;
3231 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3233 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3234 return NO_EXIT;
3237 #ifndef CONFIG_USER_ONLY
3238 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3240 check_privileged(s);
3241 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3242 set_cc_static(s);
3243 return NO_EXIT;
3246 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3248 check_privileged(s);
3249 gen_helper_sacf(cpu_env, o->in2);
3250 /* Addressing mode has changed, so end the block. */
3251 return EXIT_PC_STALE;
3253 #endif
3255 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3257 int sam = s->insn->data;
3258 TCGv_i64 tsam;
3259 uint64_t mask;
3261 switch (sam) {
3262 case 0:
3263 mask = 0xffffff;
3264 break;
3265 case 1:
3266 mask = 0x7fffffff;
3267 break;
3268 default:
3269 mask = -1;
3270 break;
3273 /* Bizarre but true, we check the address of the current insn for the
3274 specification exception, not the next to be executed. Thus the PoO
3275 documents that Bad Things Happen two bytes before the end. */
3276 if (s->pc & ~mask) {
3277 gen_program_exception(s, PGM_SPECIFICATION);
3278 return EXIT_NORETURN;
3280 s->next_pc &= mask;
3282 tsam = tcg_const_i64(sam);
3283 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3284 tcg_temp_free_i64(tsam);
3286 /* Always exit the TB, since we (may have) changed execution mode. */
3287 return EXIT_PC_STALE;
3290 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3292 int r1 = get_field(s->fields, r1);
3293 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3294 return NO_EXIT;
3297 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3299 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3300 return NO_EXIT;
3303 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3305 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3306 return NO_EXIT;
3309 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3311 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3312 return_low128(o->out2);
3313 return NO_EXIT;
3316 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3318 gen_helper_sqeb(o->out, cpu_env, o->in2);
3319 return NO_EXIT;
3322 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3324 gen_helper_sqdb(o->out, cpu_env, o->in2);
3325 return NO_EXIT;
3328 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3330 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3331 return_low128(o->out2);
3332 return NO_EXIT;
3335 #ifndef CONFIG_USER_ONLY
3336 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3338 check_privileged(s);
3339 potential_page_fault(s);
3340 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3341 set_cc_static(s);
3342 return NO_EXIT;
3345 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3347 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3348 check_privileged(s);
3349 potential_page_fault(s);
3350 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3351 tcg_temp_free_i32(r1);
3352 return NO_EXIT;
3354 #endif
3356 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3358 DisasCompare c;
3359 TCGv_i64 a;
3360 TCGLabel *lab;
3361 int r1;
3363 disas_jcc(s, &c, get_field(s->fields, m3));
3365 /* We want to store when the condition is fulfilled, so branch
3366 out when it's not */
3367 c.cond = tcg_invert_cond(c.cond);
3369 lab = gen_new_label();
3370 if (c.is_64) {
3371 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3372 } else {
3373 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3375 free_compare(&c);
3377 r1 = get_field(s->fields, r1);
3378 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3379 if (s->insn->data) {
3380 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3381 } else {
3382 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3384 tcg_temp_free_i64(a);
3386 gen_set_label(lab);
3387 return NO_EXIT;
3390 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3392 uint64_t sign = 1ull << s->insn->data;
3393 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3394 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3395 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3396 /* The arithmetic left shift is curious in that it does not affect
3397 the sign bit. Copy that over from the source unchanged. */
3398 tcg_gen_andi_i64(o->out, o->out, ~sign);
3399 tcg_gen_andi_i64(o->in1, o->in1, sign);
3400 tcg_gen_or_i64(o->out, o->out, o->in1);
3401 return NO_EXIT;
3404 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3406 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3407 return NO_EXIT;
3410 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3412 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3413 return NO_EXIT;
3416 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3418 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3419 return NO_EXIT;
3422 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3424 gen_helper_sfpc(cpu_env, o->in2);
3425 return NO_EXIT;
3428 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3430 gen_helper_sfas(cpu_env, o->in2);
3431 return NO_EXIT;
3434 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3436 int b2 = get_field(s->fields, b2);
3437 int d2 = get_field(s->fields, d2);
3438 TCGv_i64 t1 = tcg_temp_new_i64();
3439 TCGv_i64 t2 = tcg_temp_new_i64();
3440 int mask, pos, len;
3442 switch (s->fields->op2) {
3443 case 0x99: /* SRNM */
3444 pos = 0, len = 2;
3445 break;
3446 case 0xb8: /* SRNMB */
3447 pos = 0, len = 3;
3448 break;
3449 case 0xb9: /* SRNMT */
3450 pos = 4, len = 3;
3451 break;
3452 default:
3453 tcg_abort();
3455 mask = (1 << len) - 1;
3457 /* Insert the value into the appropriate field of the FPC. */
3458 if (b2 == 0) {
3459 tcg_gen_movi_i64(t1, d2 & mask);
3460 } else {
3461 tcg_gen_addi_i64(t1, regs[b2], d2);
3462 tcg_gen_andi_i64(t1, t1, mask);
3464 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3465 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3466 tcg_temp_free_i64(t1);
3468 /* Then install the new FPC to set the rounding mode in fpu_status. */
3469 gen_helper_sfpc(cpu_env, t2);
3470 tcg_temp_free_i64(t2);
3471 return NO_EXIT;
3474 #ifndef CONFIG_USER_ONLY
3475 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3477 check_privileged(s);
3478 tcg_gen_shri_i64(o->in2, o->in2, 4);
3479 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3480 return NO_EXIT;
3483 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3485 check_privileged(s);
3486 gen_helper_sske(cpu_env, o->in1, o->in2);
3487 return NO_EXIT;
3490 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3492 check_privileged(s);
3493 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3494 return NO_EXIT;
3497 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3499 check_privileged(s);
3500 /* ??? Surely cpu address != cpu number. In any case the previous
3501 version of this stored more than the required half-word, so it
3502 is unlikely this has ever been tested. */
3503 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3504 return NO_EXIT;
3507 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3509 gen_helper_stck(o->out, cpu_env);
3510 /* ??? We don't implement clock states. */
3511 gen_op_movi_cc(s, 0);
3512 return NO_EXIT;
3515 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3517 TCGv_i64 c1 = tcg_temp_new_i64();
3518 TCGv_i64 c2 = tcg_temp_new_i64();
3519 gen_helper_stck(c1, cpu_env);
3520 /* Shift the 64-bit value into its place as a zero-extended
3521 104-bit value. Note that "bit positions 64-103 are always
3522 non-zero so that they compare differently to STCK"; we set
3523 the least significant bit to 1. */
3524 tcg_gen_shli_i64(c2, c1, 56);
3525 tcg_gen_shri_i64(c1, c1, 8);
3526 tcg_gen_ori_i64(c2, c2, 0x10000);
3527 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3528 tcg_gen_addi_i64(o->in2, o->in2, 8);
3529 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3530 tcg_temp_free_i64(c1);
3531 tcg_temp_free_i64(c2);
3532 /* ??? We don't implement clock states. */
3533 gen_op_movi_cc(s, 0);
3534 return NO_EXIT;
3537 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3539 check_privileged(s);
3540 gen_helper_sckc(cpu_env, o->in2);
3541 return NO_EXIT;
3544 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3546 check_privileged(s);
3547 gen_helper_stckc(o->out, cpu_env);
3548 return NO_EXIT;
3551 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3553 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3554 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3555 check_privileged(s);
3556 potential_page_fault(s);
3557 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3558 tcg_temp_free_i32(r1);
3559 tcg_temp_free_i32(r3);
3560 return NO_EXIT;
3563 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3565 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3566 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3567 check_privileged(s);
3568 potential_page_fault(s);
3569 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3570 tcg_temp_free_i32(r1);
3571 tcg_temp_free_i32(r3);
3572 return NO_EXIT;
3575 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3577 TCGv_i64 t1 = tcg_temp_new_i64();
3579 check_privileged(s);
3580 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3581 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3582 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3583 tcg_temp_free_i64(t1);
3585 return NO_EXIT;
3588 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3590 check_privileged(s);
3591 gen_helper_spt(cpu_env, o->in2);
3592 return NO_EXIT;
3595 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3597 TCGv_i64 f, a;
3598 /* We really ought to have more complete indication of facilities
3599 that we implement. Address this when STFLE is implemented. */
3600 check_privileged(s);
3601 f = tcg_const_i64(0xc0000000);
3602 a = tcg_const_i64(200);
3603 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3604 tcg_temp_free_i64(f);
3605 tcg_temp_free_i64(a);
3606 return NO_EXIT;
3609 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3611 check_privileged(s);
3612 gen_helper_stpt(o->out, cpu_env);
3613 return NO_EXIT;
3616 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3618 check_privileged(s);
3619 potential_page_fault(s);
3620 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3621 set_cc_static(s);
3622 return NO_EXIT;
3625 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3627 check_privileged(s);
3628 gen_helper_spx(cpu_env, o->in2);
3629 return NO_EXIT;
3632 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3634 check_privileged(s);
3635 potential_page_fault(s);
3636 gen_helper_xsch(cpu_env, regs[1]);
3637 set_cc_static(s);
3638 return NO_EXIT;
3641 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3643 check_privileged(s);
3644 potential_page_fault(s);
3645 gen_helper_csch(cpu_env, regs[1]);
3646 set_cc_static(s);
3647 return NO_EXIT;
3650 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3652 check_privileged(s);
3653 potential_page_fault(s);
3654 gen_helper_hsch(cpu_env, regs[1]);
3655 set_cc_static(s);
3656 return NO_EXIT;
3659 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3661 check_privileged(s);
3662 potential_page_fault(s);
3663 gen_helper_msch(cpu_env, regs[1], o->in2);
3664 set_cc_static(s);
3665 return NO_EXIT;
3668 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3670 check_privileged(s);
3671 potential_page_fault(s);
3672 gen_helper_rchp(cpu_env, regs[1]);
3673 set_cc_static(s);
3674 return NO_EXIT;
3677 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3679 check_privileged(s);
3680 potential_page_fault(s);
3681 gen_helper_rsch(cpu_env, regs[1]);
3682 set_cc_static(s);
3683 return NO_EXIT;
3686 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3688 check_privileged(s);
3689 potential_page_fault(s);
3690 gen_helper_ssch(cpu_env, regs[1], o->in2);
3691 set_cc_static(s);
3692 return NO_EXIT;
3695 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3697 check_privileged(s);
3698 potential_page_fault(s);
3699 gen_helper_stsch(cpu_env, regs[1], o->in2);
3700 set_cc_static(s);
3701 return NO_EXIT;
3704 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3706 check_privileged(s);
3707 potential_page_fault(s);
3708 gen_helper_tsch(cpu_env, regs[1], o->in2);
3709 set_cc_static(s);
3710 return NO_EXIT;
3713 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3715 check_privileged(s);
3716 potential_page_fault(s);
3717 gen_helper_chsc(cpu_env, o->in2);
3718 set_cc_static(s);
3719 return NO_EXIT;
3722 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3724 check_privileged(s);
3725 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3726 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3727 return NO_EXIT;
3730 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3732 uint64_t i2 = get_field(s->fields, i2);
3733 TCGv_i64 t;
3735 check_privileged(s);
3737 /* It is important to do what the instruction name says: STORE THEN.
3738 If we let the output hook perform the store then if we fault and
3739 restart, we'll have the wrong SYSTEM MASK in place. */
3740 t = tcg_temp_new_i64();
3741 tcg_gen_shri_i64(t, psw_mask, 56);
3742 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3743 tcg_temp_free_i64(t);
3745 if (s->fields->op == 0xac) {
3746 tcg_gen_andi_i64(psw_mask, psw_mask,
3747 (i2 << 56) | 0x00ffffffffffffffull);
3748 } else {
3749 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3751 return NO_EXIT;
3754 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3756 check_privileged(s);
3757 potential_page_fault(s);
3758 gen_helper_stura(cpu_env, o->in2, o->in1);
3759 return NO_EXIT;
3762 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3764 check_privileged(s);
3765 potential_page_fault(s);
3766 gen_helper_sturg(cpu_env, o->in2, o->in1);
3767 return NO_EXIT;
3769 #endif
3771 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3773 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3774 return NO_EXIT;
3777 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3779 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3780 return NO_EXIT;
3783 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3785 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3786 return NO_EXIT;
3789 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3791 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3792 return NO_EXIT;
3795 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3797 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3798 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3799 potential_page_fault(s);
3800 gen_helper_stam(cpu_env, r1, o->in2, r3);
3801 tcg_temp_free_i32(r1);
3802 tcg_temp_free_i32(r3);
3803 return NO_EXIT;
3806 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3808 int m3 = get_field(s->fields, m3);
3809 int pos, base = s->insn->data;
3810 TCGv_i64 tmp = tcg_temp_new_i64();
3812 pos = base + ctz32(m3) * 8;
3813 switch (m3) {
3814 case 0xf:
3815 /* Effectively a 32-bit store. */
3816 tcg_gen_shri_i64(tmp, o->in1, pos);
3817 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3818 break;
3820 case 0xc:
3821 case 0x6:
3822 case 0x3:
3823 /* Effectively a 16-bit store. */
3824 tcg_gen_shri_i64(tmp, o->in1, pos);
3825 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3826 break;
3828 case 0x8:
3829 case 0x4:
3830 case 0x2:
3831 case 0x1:
3832 /* Effectively an 8-bit store. */
3833 tcg_gen_shri_i64(tmp, o->in1, pos);
3834 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3835 break;
3837 default:
3838 /* This is going to be a sequence of shifts and stores. */
3839 pos = base + 32 - 8;
3840 while (m3) {
3841 if (m3 & 0x8) {
3842 tcg_gen_shri_i64(tmp, o->in1, pos);
3843 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3844 tcg_gen_addi_i64(o->in2, o->in2, 1);
3846 m3 = (m3 << 1) & 0xf;
3847 pos -= 8;
3849 break;
3851 tcg_temp_free_i64(tmp);
3852 return NO_EXIT;
3855 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3857 int r1 = get_field(s->fields, r1);
3858 int r3 = get_field(s->fields, r3);
3859 int size = s->insn->data;
3860 TCGv_i64 tsize = tcg_const_i64(size);
3862 while (1) {
3863 if (size == 8) {
3864 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3865 } else {
3866 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3868 if (r1 == r3) {
3869 break;
3871 tcg_gen_add_i64(o->in2, o->in2, tsize);
3872 r1 = (r1 + 1) & 15;
3875 tcg_temp_free_i64(tsize);
3876 return NO_EXIT;
3879 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3881 int r1 = get_field(s->fields, r1);
3882 int r3 = get_field(s->fields, r3);
3883 TCGv_i64 t = tcg_temp_new_i64();
3884 TCGv_i64 t4 = tcg_const_i64(4);
3885 TCGv_i64 t32 = tcg_const_i64(32);
3887 while (1) {
3888 tcg_gen_shl_i64(t, regs[r1], t32);
3889 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3890 if (r1 == r3) {
3891 break;
3893 tcg_gen_add_i64(o->in2, o->in2, t4);
3894 r1 = (r1 + 1) & 15;
3897 tcg_temp_free_i64(t);
3898 tcg_temp_free_i64(t4);
3899 tcg_temp_free_i64(t32);
3900 return NO_EXIT;
3903 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3905 potential_page_fault(s);
3906 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3907 set_cc_static(s);
3908 return_low128(o->in2);
3909 return NO_EXIT;
3912 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3914 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3915 return NO_EXIT;
3918 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3920 DisasCompare cmp;
3921 TCGv_i64 borrow;
3923 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3925 /* The !borrow flag is the msb of CC. Since we want the inverse of
3926 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3927 disas_jcc(s, &cmp, 8 | 4);
3928 borrow = tcg_temp_new_i64();
3929 if (cmp.is_64) {
3930 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3931 } else {
3932 TCGv_i32 t = tcg_temp_new_i32();
3933 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3934 tcg_gen_extu_i32_i64(borrow, t);
3935 tcg_temp_free_i32(t);
3937 free_compare(&cmp);
3939 tcg_gen_sub_i64(o->out, o->out, borrow);
3940 tcg_temp_free_i64(borrow);
3941 return NO_EXIT;
3944 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3946 TCGv_i32 t;
3948 update_psw_addr(s);
3949 update_cc_op(s);
3951 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3952 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3953 tcg_temp_free_i32(t);
3955 t = tcg_const_i32(s->next_pc - s->pc);
3956 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3957 tcg_temp_free_i32(t);
3959 gen_exception(EXCP_SVC);
3960 return EXIT_NORETURN;
3963 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3965 gen_helper_tceb(cc_op, o->in1, o->in2);
3966 set_cc_static(s);
3967 return NO_EXIT;
3970 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3972 gen_helper_tcdb(cc_op, o->in1, o->in2);
3973 set_cc_static(s);
3974 return NO_EXIT;
3977 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3979 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3980 set_cc_static(s);
3981 return NO_EXIT;
3984 #ifndef CONFIG_USER_ONLY
3985 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3987 potential_page_fault(s);
3988 gen_helper_tprot(cc_op, o->addr1, o->in2);
3989 set_cc_static(s);
3990 return NO_EXIT;
3992 #endif
3994 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3996 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3997 potential_page_fault(s);
3998 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3999 tcg_temp_free_i32(l);
4000 set_cc_static(s);
4001 return NO_EXIT;
4004 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4006 potential_page_fault(s);
4007 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4008 return_low128(o->out2);
4009 set_cc_static(s);
4010 return NO_EXIT;
4013 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4015 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4016 potential_page_fault(s);
4017 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4018 tcg_temp_free_i32(l);
4019 set_cc_static(s);
4020 return NO_EXIT;
4023 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4025 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4026 potential_page_fault(s);
4027 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4028 tcg_temp_free_i32(l);
4029 return NO_EXIT;
4032 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4034 int d1 = get_field(s->fields, d1);
4035 int d2 = get_field(s->fields, d2);
4036 int b1 = get_field(s->fields, b1);
4037 int b2 = get_field(s->fields, b2);
4038 int l = get_field(s->fields, l1);
4039 TCGv_i32 t32;
4041 o->addr1 = get_address(s, 0, b1, d1);
4043 /* If the addresses are identical, this is a store/memset of zero. */
4044 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4045 o->in2 = tcg_const_i64(0);
4047 l++;
4048 while (l >= 8) {
4049 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4050 l -= 8;
4051 if (l > 0) {
4052 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4055 if (l >= 4) {
4056 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4057 l -= 4;
4058 if (l > 0) {
4059 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4062 if (l >= 2) {
4063 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4064 l -= 2;
4065 if (l > 0) {
4066 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4069 if (l) {
4070 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4072 gen_op_movi_cc(s, 0);
4073 return NO_EXIT;
4076 /* But in general we'll defer to a helper. */
4077 o->in2 = get_address(s, 0, b2, d2);
4078 t32 = tcg_const_i32(l);
4079 potential_page_fault(s);
4080 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4081 tcg_temp_free_i32(t32);
4082 set_cc_static(s);
4083 return NO_EXIT;
4086 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4088 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4089 return NO_EXIT;
4092 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4094 int shift = s->insn->data & 0xff;
4095 int size = s->insn->data >> 8;
4096 uint64_t mask = ((1ull << size) - 1) << shift;
4098 assert(!o->g_in2);
4099 tcg_gen_shli_i64(o->in2, o->in2, shift);
4100 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4102 /* Produce the CC from only the bits manipulated. */
4103 tcg_gen_andi_i64(cc_dst, o->out, mask);
4104 set_cc_nz_u64(s, cc_dst);
4105 return NO_EXIT;
4108 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4110 o->out = tcg_const_i64(0);
4111 return NO_EXIT;
4114 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4116 o->out = tcg_const_i64(0);
4117 o->out2 = o->out;
4118 o->g_out2 = true;
4119 return NO_EXIT;
4122 /* ====================================================================== */
4123 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4124 the original inputs), update the various cc data structures in order to
4125 be able to compute the new condition code. */
4127 static void cout_abs32(DisasContext *s, DisasOps *o)
4129 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4132 static void cout_abs64(DisasContext *s, DisasOps *o)
4134 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4137 static void cout_adds32(DisasContext *s, DisasOps *o)
4139 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4142 static void cout_adds64(DisasContext *s, DisasOps *o)
4144 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4147 static void cout_addu32(DisasContext *s, DisasOps *o)
4149 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4152 static void cout_addu64(DisasContext *s, DisasOps *o)
4154 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4157 static void cout_addc32(DisasContext *s, DisasOps *o)
4159 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4162 static void cout_addc64(DisasContext *s, DisasOps *o)
4164 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4167 static void cout_cmps32(DisasContext *s, DisasOps *o)
4169 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4172 static void cout_cmps64(DisasContext *s, DisasOps *o)
4174 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4177 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4179 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4182 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4184 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4187 static void cout_f32(DisasContext *s, DisasOps *o)
4189 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4192 static void cout_f64(DisasContext *s, DisasOps *o)
4194 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4197 static void cout_f128(DisasContext *s, DisasOps *o)
4199 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4202 static void cout_nabs32(DisasContext *s, DisasOps *o)
4204 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4207 static void cout_nabs64(DisasContext *s, DisasOps *o)
4209 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4212 static void cout_neg32(DisasContext *s, DisasOps *o)
4214 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4217 static void cout_neg64(DisasContext *s, DisasOps *o)
4219 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4222 static void cout_nz32(DisasContext *s, DisasOps *o)
4224 tcg_gen_ext32u_i64(cc_dst, o->out);
4225 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4228 static void cout_nz64(DisasContext *s, DisasOps *o)
4230 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4233 static void cout_s32(DisasContext *s, DisasOps *o)
4235 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4238 static void cout_s64(DisasContext *s, DisasOps *o)
4240 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4243 static void cout_subs32(DisasContext *s, DisasOps *o)
4245 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4248 static void cout_subs64(DisasContext *s, DisasOps *o)
4250 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4253 static void cout_subu32(DisasContext *s, DisasOps *o)
4255 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4258 static void cout_subu64(DisasContext *s, DisasOps *o)
4260 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4263 static void cout_subb32(DisasContext *s, DisasOps *o)
4265 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4268 static void cout_subb64(DisasContext *s, DisasOps *o)
4270 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4273 static void cout_tm32(DisasContext *s, DisasOps *o)
4275 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4278 static void cout_tm64(DisasContext *s, DisasOps *o)
4280 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4283 /* ====================================================================== */
4284 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4285 with the TCG register to which we will write. Used in combination with
4286 the "wout" generators, in some cases we need a new temporary, and in
4287 some cases we can write to a TCG global. */
4289 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4291 o->out = tcg_temp_new_i64();
4293 #define SPEC_prep_new 0
4295 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4297 o->out = tcg_temp_new_i64();
4298 o->out2 = tcg_temp_new_i64();
4300 #define SPEC_prep_new_P 0
4302 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4304 o->out = regs[get_field(f, r1)];
4305 o->g_out = true;
4307 #define SPEC_prep_r1 0
4309 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4311 int r1 = get_field(f, r1);
4312 o->out = regs[r1];
4313 o->out2 = regs[r1 + 1];
4314 o->g_out = o->g_out2 = true;
4316 #define SPEC_prep_r1_P SPEC_r1_even
4318 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4320 o->out = fregs[get_field(f, r1)];
4321 o->g_out = true;
4323 #define SPEC_prep_f1 0
4325 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4327 int r1 = get_field(f, r1);
4328 o->out = fregs[r1];
4329 o->out2 = fregs[r1 + 2];
4330 o->g_out = o->g_out2 = true;
4332 #define SPEC_prep_x1 SPEC_r1_f128
4334 /* ====================================================================== */
4335 /* The "Write OUTput" generators. These generally perform some non-trivial
4336 copy of data to TCG globals, or to main memory. The trivial cases are
4337 generally handled by having a "prep" generator install the TCG global
4338 as the destination of the operation. */
4340 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4342 store_reg(get_field(f, r1), o->out);
4344 #define SPEC_wout_r1 0
4346 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4348 int r1 = get_field(f, r1);
4349 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4351 #define SPEC_wout_r1_8 0
4353 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4355 int r1 = get_field(f, r1);
4356 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4358 #define SPEC_wout_r1_16 0
4360 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4362 store_reg32_i64(get_field(f, r1), o->out);
4364 #define SPEC_wout_r1_32 0
4366 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4368 store_reg32h_i64(get_field(f, r1), o->out);
4370 #define SPEC_wout_r1_32h 0
4372 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4374 int r1 = get_field(f, r1);
4375 store_reg32_i64(r1, o->out);
4376 store_reg32_i64(r1 + 1, o->out2);
4378 #define SPEC_wout_r1_P32 SPEC_r1_even
4380 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4382 int r1 = get_field(f, r1);
4383 store_reg32_i64(r1 + 1, o->out);
4384 tcg_gen_shri_i64(o->out, o->out, 32);
4385 store_reg32_i64(r1, o->out);
4387 #define SPEC_wout_r1_D32 SPEC_r1_even
4389 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4391 store_freg32_i64(get_field(f, r1), o->out);
4393 #define SPEC_wout_e1 0
4395 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4397 store_freg(get_field(f, r1), o->out);
4399 #define SPEC_wout_f1 0
4401 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4403 int f1 = get_field(s->fields, r1);
4404 store_freg(f1, o->out);
4405 store_freg(f1 + 2, o->out2);
4407 #define SPEC_wout_x1 SPEC_r1_f128
4409 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4411 if (get_field(f, r1) != get_field(f, r2)) {
4412 store_reg32_i64(get_field(f, r1), o->out);
4415 #define SPEC_wout_cond_r1r2_32 0
4417 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4419 if (get_field(f, r1) != get_field(f, r2)) {
4420 store_freg32_i64(get_field(f, r1), o->out);
4423 #define SPEC_wout_cond_e1e2 0
4425 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4427 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4429 #define SPEC_wout_m1_8 0
4431 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4433 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4435 #define SPEC_wout_m1_16 0
4437 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4439 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4441 #define SPEC_wout_m1_32 0
4443 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4445 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4447 #define SPEC_wout_m1_64 0
4449 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4451 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4453 #define SPEC_wout_m2_32 0
4455 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4457 /* XXX release reservation */
4458 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4459 store_reg32_i64(get_field(f, r1), o->in2);
4461 #define SPEC_wout_m2_32_r1_atomic 0
4463 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4465 /* XXX release reservation */
4466 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4467 store_reg(get_field(f, r1), o->in2);
4469 #define SPEC_wout_m2_64_r1_atomic 0
4471 /* ====================================================================== */
4472 /* The "INput 1" generators. These load the first operand to an insn. */
4474 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4476 o->in1 = load_reg(get_field(f, r1));
4478 #define SPEC_in1_r1 0
4480 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4482 o->in1 = regs[get_field(f, r1)];
4483 o->g_in1 = true;
4485 #define SPEC_in1_r1_o 0
4487 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4489 o->in1 = tcg_temp_new_i64();
4490 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4492 #define SPEC_in1_r1_32s 0
4494 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4496 o->in1 = tcg_temp_new_i64();
4497 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4499 #define SPEC_in1_r1_32u 0
4501 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4503 o->in1 = tcg_temp_new_i64();
4504 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4506 #define SPEC_in1_r1_sr32 0
4508 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4510 o->in1 = load_reg(get_field(f, r1) + 1);
4512 #define SPEC_in1_r1p1 SPEC_r1_even
4514 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4516 o->in1 = tcg_temp_new_i64();
4517 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4519 #define SPEC_in1_r1p1_32s SPEC_r1_even
4521 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4523 o->in1 = tcg_temp_new_i64();
4524 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4526 #define SPEC_in1_r1p1_32u SPEC_r1_even
4528 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4530 int r1 = get_field(f, r1);
4531 o->in1 = tcg_temp_new_i64();
4532 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4534 #define SPEC_in1_r1_D32 SPEC_r1_even
4536 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4538 o->in1 = load_reg(get_field(f, r2));
4540 #define SPEC_in1_r2 0
4542 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4544 o->in1 = tcg_temp_new_i64();
4545 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4547 #define SPEC_in1_r2_sr32 0
4549 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4551 o->in1 = load_reg(get_field(f, r3));
4553 #define SPEC_in1_r3 0
4555 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4557 o->in1 = regs[get_field(f, r3)];
4558 o->g_in1 = true;
4560 #define SPEC_in1_r3_o 0
4562 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4564 o->in1 = tcg_temp_new_i64();
4565 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4567 #define SPEC_in1_r3_32s 0
4569 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4571 o->in1 = tcg_temp_new_i64();
4572 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4574 #define SPEC_in1_r3_32u 0
4576 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4578 int r3 = get_field(f, r3);
4579 o->in1 = tcg_temp_new_i64();
4580 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4582 #define SPEC_in1_r3_D32 SPEC_r3_even
4584 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4586 o->in1 = load_freg32_i64(get_field(f, r1));
4588 #define SPEC_in1_e1 0
4590 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4592 o->in1 = fregs[get_field(f, r1)];
4593 o->g_in1 = true;
4595 #define SPEC_in1_f1_o 0
4597 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4599 int r1 = get_field(f, r1);
4600 o->out = fregs[r1];
4601 o->out2 = fregs[r1 + 2];
4602 o->g_out = o->g_out2 = true;
4604 #define SPEC_in1_x1_o SPEC_r1_f128
4606 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4608 o->in1 = fregs[get_field(f, r3)];
4609 o->g_in1 = true;
4611 #define SPEC_in1_f3_o 0
4613 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4615 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4617 #define SPEC_in1_la1 0
4619 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4621 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4622 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4624 #define SPEC_in1_la2 0
4626 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4628 in1_la1(s, f, o);
4629 o->in1 = tcg_temp_new_i64();
4630 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4632 #define SPEC_in1_m1_8u 0
4634 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4636 in1_la1(s, f, o);
4637 o->in1 = tcg_temp_new_i64();
4638 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4640 #define SPEC_in1_m1_16s 0
4642 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4644 in1_la1(s, f, o);
4645 o->in1 = tcg_temp_new_i64();
4646 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4648 #define SPEC_in1_m1_16u 0
4650 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4652 in1_la1(s, f, o);
4653 o->in1 = tcg_temp_new_i64();
4654 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4656 #define SPEC_in1_m1_32s 0
4658 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4660 in1_la1(s, f, o);
4661 o->in1 = tcg_temp_new_i64();
4662 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4664 #define SPEC_in1_m1_32u 0
4666 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4668 in1_la1(s, f, o);
4669 o->in1 = tcg_temp_new_i64();
4670 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4672 #define SPEC_in1_m1_64 0
4674 /* ====================================================================== */
4675 /* The "INput 2" generators. These load the second operand to an insn. */
4677 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4679 o->in2 = regs[get_field(f, r1)];
4680 o->g_in2 = true;
4682 #define SPEC_in2_r1_o 0
4684 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4686 o->in2 = tcg_temp_new_i64();
4687 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4689 #define SPEC_in2_r1_16u 0
4691 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4693 o->in2 = tcg_temp_new_i64();
4694 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4696 #define SPEC_in2_r1_32u 0
4698 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4700 int r1 = get_field(f, r1);
4701 o->in2 = tcg_temp_new_i64();
4702 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4704 #define SPEC_in2_r1_D32 SPEC_r1_even
4706 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4708 o->in2 = load_reg(get_field(f, r2));
4710 #define SPEC_in2_r2 0
4712 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4714 o->in2 = regs[get_field(f, r2)];
4715 o->g_in2 = true;
4717 #define SPEC_in2_r2_o 0
4719 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4721 int r2 = get_field(f, r2);
4722 if (r2 != 0) {
4723 o->in2 = load_reg(r2);
4726 #define SPEC_in2_r2_nz 0
4728 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4730 o->in2 = tcg_temp_new_i64();
4731 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4733 #define SPEC_in2_r2_8s 0
4735 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4737 o->in2 = tcg_temp_new_i64();
4738 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4740 #define SPEC_in2_r2_8u 0
4742 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4744 o->in2 = tcg_temp_new_i64();
4745 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4747 #define SPEC_in2_r2_16s 0
4749 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4751 o->in2 = tcg_temp_new_i64();
4752 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4754 #define SPEC_in2_r2_16u 0
4756 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4758 o->in2 = load_reg(get_field(f, r3));
4760 #define SPEC_in2_r3 0
4762 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4764 o->in2 = tcg_temp_new_i64();
4765 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4767 #define SPEC_in2_r3_sr32 0
4769 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4771 o->in2 = tcg_temp_new_i64();
4772 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4774 #define SPEC_in2_r2_32s 0
4776 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4778 o->in2 = tcg_temp_new_i64();
4779 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4781 #define SPEC_in2_r2_32u 0
4783 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4785 o->in2 = tcg_temp_new_i64();
4786 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4788 #define SPEC_in2_r2_sr32 0
4790 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4792 o->in2 = load_freg32_i64(get_field(f, r2));
4794 #define SPEC_in2_e2 0
4796 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4798 o->in2 = fregs[get_field(f, r2)];
4799 o->g_in2 = true;
4801 #define SPEC_in2_f2_o 0
4803 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4805 int r2 = get_field(f, r2);
4806 o->in1 = fregs[r2];
4807 o->in2 = fregs[r2 + 2];
4808 o->g_in1 = o->g_in2 = true;
4810 #define SPEC_in2_x2_o SPEC_r2_f128
4812 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4814 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4816 #define SPEC_in2_ra2 0
4818 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4820 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4821 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4823 #define SPEC_in2_a2 0
4825 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4827 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4829 #define SPEC_in2_ri2 0
4831 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4833 help_l2_shift(s, f, o, 31);
4835 #define SPEC_in2_sh32 0
4837 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4839 help_l2_shift(s, f, o, 63);
4841 #define SPEC_in2_sh64 0
4843 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4845 in2_a2(s, f, o);
4846 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4848 #define SPEC_in2_m2_8u 0
4850 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4852 in2_a2(s, f, o);
4853 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4855 #define SPEC_in2_m2_16s 0
4857 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4859 in2_a2(s, f, o);
4860 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4862 #define SPEC_in2_m2_16u 0
4864 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4866 in2_a2(s, f, o);
4867 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4869 #define SPEC_in2_m2_32s 0
4871 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4873 in2_a2(s, f, o);
4874 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4876 #define SPEC_in2_m2_32u 0
4878 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4880 in2_a2(s, f, o);
4881 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4883 #define SPEC_in2_m2_64 0
4885 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4887 in2_ri2(s, f, o);
4888 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4890 #define SPEC_in2_mri2_16u 0
4892 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4894 in2_ri2(s, f, o);
4895 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4897 #define SPEC_in2_mri2_32s 0
4899 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4901 in2_ri2(s, f, o);
4902 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4904 #define SPEC_in2_mri2_32u 0
4906 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4908 in2_ri2(s, f, o);
4909 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4911 #define SPEC_in2_mri2_64 0
4913 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4915 /* XXX should reserve the address */
4916 in1_la2(s, f, o);
4917 o->in2 = tcg_temp_new_i64();
4918 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4920 #define SPEC_in2_m2_32s_atomic 0
4922 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4924 /* XXX should reserve the address */
4925 in1_la2(s, f, o);
4926 o->in2 = tcg_temp_new_i64();
4927 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4929 #define SPEC_in2_m2_64_atomic 0
4931 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4933 o->in2 = tcg_const_i64(get_field(f, i2));
4935 #define SPEC_in2_i2 0
4937 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4939 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4941 #define SPEC_in2_i2_8u 0
4943 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4945 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4947 #define SPEC_in2_i2_16u 0
4949 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4951 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4953 #define SPEC_in2_i2_32u 0
4955 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4957 uint64_t i2 = (uint16_t)get_field(f, i2);
4958 o->in2 = tcg_const_i64(i2 << s->insn->data);
4960 #define SPEC_in2_i2_16u_shl 0
4962 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4964 uint64_t i2 = (uint32_t)get_field(f, i2);
4965 o->in2 = tcg_const_i64(i2 << s->insn->data);
4967 #define SPEC_in2_i2_32u_shl 0
4969 #ifndef CONFIG_USER_ONLY
4970 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4972 o->in2 = tcg_const_i64(s->fields->raw_insn);
4974 #define SPEC_in2_insn 0
4975 #endif
4977 /* ====================================================================== */
4979 /* Find opc within the table of insns. This is formulated as a switch
4980 statement so that (1) we get compile-time notice of cut-paste errors
4981 for duplicated opcodes, and (2) the compiler generates the binary
4982 search tree, rather than us having to post-process the table. */
4984 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4985 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4987 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4989 enum DisasInsnEnum {
4990 #include "insn-data.def"
4993 #undef D
4994 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4995 .opc = OPC, \
4996 .fmt = FMT_##FT, \
4997 .fac = FAC_##FC, \
4998 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4999 .name = #NM, \
5000 .help_in1 = in1_##I1, \
5001 .help_in2 = in2_##I2, \
5002 .help_prep = prep_##P, \
5003 .help_wout = wout_##W, \
5004 .help_cout = cout_##CC, \
5005 .help_op = op_##OP, \
5006 .data = D \
5009 /* Allow 0 to be used for NULL in the table below. */
5010 #define in1_0 NULL
5011 #define in2_0 NULL
5012 #define prep_0 NULL
5013 #define wout_0 NULL
5014 #define cout_0 NULL
5015 #define op_0 NULL
5017 #define SPEC_in1_0 0
5018 #define SPEC_in2_0 0
5019 #define SPEC_prep_0 0
5020 #define SPEC_wout_0 0
5022 static const DisasInsn insn_info[] = {
5023 #include "insn-data.def"
5026 #undef D
5027 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5028 case OPC: return &insn_info[insn_ ## NM];
5030 static const DisasInsn *lookup_opc(uint16_t opc)
5032 switch (opc) {
5033 #include "insn-data.def"
5034 default:
5035 return NULL;
5039 #undef D
5040 #undef C
5042 /* Extract a field from the insn. The INSN should be left-aligned in
5043 the uint64_t so that we can more easily utilize the big-bit-endian
5044 definitions we extract from the Principals of Operation. */
5046 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5048 uint32_t r, m;
5050 if (f->size == 0) {
5051 return;
5054 /* Zero extract the field from the insn. */
5055 r = (insn << f->beg) >> (64 - f->size);
5057 /* Sign-extend, or un-swap the field as necessary. */
5058 switch (f->type) {
5059 case 0: /* unsigned */
5060 break;
5061 case 1: /* signed */
5062 assert(f->size <= 32);
5063 m = 1u << (f->size - 1);
5064 r = (r ^ m) - m;
5065 break;
5066 case 2: /* dl+dh split, signed 20 bit. */
5067 r = ((int8_t)r << 12) | (r >> 8);
5068 break;
5069 default:
5070 abort();
5073 /* Validate that the "compressed" encoding we selected above is valid.
5074 I.e. we havn't make two different original fields overlap. */
5075 assert(((o->presentC >> f->indexC) & 1) == 0);
5076 o->presentC |= 1 << f->indexC;
5077 o->presentO |= 1 << f->indexO;
5079 o->c[f->indexC] = r;
5082 /* Lookup the insn at the current PC, extracting the operands into O and
5083 returning the info struct for the insn. Returns NULL for invalid insn. */
5085 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5086 DisasFields *f)
5088 uint64_t insn, pc = s->pc;
5089 int op, op2, ilen;
5090 const DisasInsn *info;
5092 insn = ld_code2(env, pc);
5093 op = (insn >> 8) & 0xff;
5094 ilen = get_ilen(op);
5095 s->next_pc = s->pc + ilen;
5097 switch (ilen) {
5098 case 2:
5099 insn = insn << 48;
5100 break;
5101 case 4:
5102 insn = ld_code4(env, pc) << 32;
5103 break;
5104 case 6:
5105 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5106 break;
5107 default:
5108 abort();
5111 /* We can't actually determine the insn format until we've looked up
5112 the full insn opcode. Which we can't do without locating the
5113 secondary opcode. Assume by default that OP2 is at bit 40; for
5114 those smaller insns that don't actually have a secondary opcode
5115 this will correctly result in OP2 = 0. */
5116 switch (op) {
5117 case 0x01: /* E */
5118 case 0x80: /* S */
5119 case 0x82: /* S */
5120 case 0x93: /* S */
5121 case 0xb2: /* S, RRF, RRE */
5122 case 0xb3: /* RRE, RRD, RRF */
5123 case 0xb9: /* RRE, RRF */
5124 case 0xe5: /* SSE, SIL */
5125 op2 = (insn << 8) >> 56;
5126 break;
5127 case 0xa5: /* RI */
5128 case 0xa7: /* RI */
5129 case 0xc0: /* RIL */
5130 case 0xc2: /* RIL */
5131 case 0xc4: /* RIL */
5132 case 0xc6: /* RIL */
5133 case 0xc8: /* SSF */
5134 case 0xcc: /* RIL */
5135 op2 = (insn << 12) >> 60;
5136 break;
5137 case 0xd0 ... 0xdf: /* SS */
5138 case 0xe1: /* SS */
5139 case 0xe2: /* SS */
5140 case 0xe8: /* SS */
5141 case 0xe9: /* SS */
5142 case 0xea: /* SS */
5143 case 0xee ... 0xf3: /* SS */
5144 case 0xf8 ... 0xfd: /* SS */
5145 op2 = 0;
5146 break;
5147 default:
5148 op2 = (insn << 40) >> 56;
5149 break;
5152 memset(f, 0, sizeof(*f));
5153 f->raw_insn = insn;
5154 f->op = op;
5155 f->op2 = op2;
5157 /* Lookup the instruction. */
5158 info = lookup_opc(op << 8 | op2);
5160 /* If we found it, extract the operands. */
5161 if (info != NULL) {
5162 DisasFormat fmt = info->fmt;
5163 int i;
5165 for (i = 0; i < NUM_C_FIELD; ++i) {
5166 extract_field(f, &format_info[fmt].op[i], insn);
5169 return info;
5172 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5174 const DisasInsn *insn;
5175 ExitStatus ret = NO_EXIT;
5176 DisasFields f;
5177 DisasOps o;
5179 /* Search for the insn in the table. */
5180 insn = extract_insn(env, s, &f);
5182 /* Not found means unimplemented/illegal opcode. */
5183 if (insn == NULL) {
5184 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5185 f.op, f.op2);
5186 gen_illegal_opcode(s);
5187 return EXIT_NORETURN;
5190 /* Check for insn specification exceptions. */
5191 if (insn->spec) {
5192 int spec = insn->spec, excp = 0, r;
5194 if (spec & SPEC_r1_even) {
5195 r = get_field(&f, r1);
5196 if (r & 1) {
5197 excp = PGM_SPECIFICATION;
5200 if (spec & SPEC_r2_even) {
5201 r = get_field(&f, r2);
5202 if (r & 1) {
5203 excp = PGM_SPECIFICATION;
5206 if (spec & SPEC_r3_even) {
5207 r = get_field(&f, r3);
5208 if (r & 1) {
5209 excp = PGM_SPECIFICATION;
5212 if (spec & SPEC_r1_f128) {
5213 r = get_field(&f, r1);
5214 if (r > 13) {
5215 excp = PGM_SPECIFICATION;
5218 if (spec & SPEC_r2_f128) {
5219 r = get_field(&f, r2);
5220 if (r > 13) {
5221 excp = PGM_SPECIFICATION;
5224 if (excp) {
5225 gen_program_exception(s, excp);
5226 return EXIT_NORETURN;
5230 /* Set up the strutures we use to communicate with the helpers. */
5231 s->insn = insn;
5232 s->fields = &f;
5233 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5234 TCGV_UNUSED_I64(o.out);
5235 TCGV_UNUSED_I64(o.out2);
5236 TCGV_UNUSED_I64(o.in1);
5237 TCGV_UNUSED_I64(o.in2);
5238 TCGV_UNUSED_I64(o.addr1);
5240 /* Implement the instruction. */
5241 if (insn->help_in1) {
5242 insn->help_in1(s, &f, &o);
5244 if (insn->help_in2) {
5245 insn->help_in2(s, &f, &o);
5247 if (insn->help_prep) {
5248 insn->help_prep(s, &f, &o);
5250 if (insn->help_op) {
5251 ret = insn->help_op(s, &o);
5253 if (insn->help_wout) {
5254 insn->help_wout(s, &f, &o);
5256 if (insn->help_cout) {
5257 insn->help_cout(s, &o);
5260 /* Free any temporaries created by the helpers. */
5261 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5262 tcg_temp_free_i64(o.out);
5264 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5265 tcg_temp_free_i64(o.out2);
5267 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5268 tcg_temp_free_i64(o.in1);
5270 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5271 tcg_temp_free_i64(o.in2);
5273 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5274 tcg_temp_free_i64(o.addr1);
5277 #ifndef CONFIG_USER_ONLY
5278 if (s->tb->flags & FLAG_MASK_PER) {
5279 /* An exception might be triggered, save PSW if not already done. */
5280 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5281 tcg_gen_movi_i64(psw_addr, s->next_pc);
5284 /* Save off cc. */
5285 update_cc_op(s);
5287 /* Call the helper to check for a possible PER exception. */
5288 gen_helper_per_check_exception(cpu_env);
5290 #endif
5292 /* Advance to the next instruction. */
5293 s->pc = s->next_pc;
5294 return ret;
5297 static inline void gen_intermediate_code_internal(S390CPU *cpu,
5298 TranslationBlock *tb,
5299 bool search_pc)
5301 CPUState *cs = CPU(cpu);
5302 CPUS390XState *env = &cpu->env;
5303 DisasContext dc;
5304 target_ulong pc_start;
5305 uint64_t next_page_start;
5306 int j, lj = -1;
5307 int num_insns, max_insns;
5308 CPUBreakpoint *bp;
5309 ExitStatus status;
5310 bool do_debug;
5312 pc_start = tb->pc;
5314 /* 31-bit mode */
5315 if (!(tb->flags & FLAG_MASK_64)) {
5316 pc_start &= 0x7fffffff;
5319 dc.tb = tb;
5320 dc.pc = pc_start;
5321 dc.cc_op = CC_OP_DYNAMIC;
5322 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5324 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5326 num_insns = 0;
5327 max_insns = tb->cflags & CF_COUNT_MASK;
5328 if (max_insns == 0) {
5329 max_insns = CF_COUNT_MASK;
5332 gen_tb_start(tb);
5334 do {
5335 if (search_pc) {
5336 j = tcg_op_buf_count();
5337 if (lj < j) {
5338 lj++;
5339 while (lj < j) {
5340 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5343 tcg_ctx.gen_opc_pc[lj] = dc.pc;
5344 gen_opc_cc_op[lj] = dc.cc_op;
5345 tcg_ctx.gen_opc_instr_start[lj] = 1;
5346 tcg_ctx.gen_opc_icount[lj] = num_insns;
5348 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5349 gen_io_start();
5352 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5353 tcg_gen_debug_insn_start(dc.pc);
5356 status = NO_EXIT;
5357 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5358 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5359 if (bp->pc == dc.pc) {
5360 status = EXIT_PC_STALE;
5361 do_debug = true;
5362 break;
5366 if (status == NO_EXIT) {
5367 status = translate_one(env, &dc);
5370 /* If we reach a page boundary, are single stepping,
5371 or exhaust instruction count, stop generation. */
5372 if (status == NO_EXIT
5373 && (dc.pc >= next_page_start
5374 || tcg_op_buf_full()
5375 || num_insns >= max_insns
5376 || singlestep
5377 || cs->singlestep_enabled)) {
5378 status = EXIT_PC_STALE;
5380 } while (status == NO_EXIT);
5382 if (tb->cflags & CF_LAST_IO) {
5383 gen_io_end();
5386 switch (status) {
5387 case EXIT_GOTO_TB:
5388 case EXIT_NORETURN:
5389 break;
5390 case EXIT_PC_STALE:
5391 update_psw_addr(&dc);
5392 /* FALLTHRU */
5393 case EXIT_PC_UPDATED:
5394 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5395 cc op type is in env */
5396 update_cc_op(&dc);
5397 /* Exit the TB, either by raising a debug exception or by return. */
5398 if (do_debug) {
5399 gen_exception(EXCP_DEBUG);
5400 } else {
5401 tcg_gen_exit_tb(0);
5403 break;
5404 default:
5405 abort();
5408 gen_tb_end(tb, num_insns);
5410 if (search_pc) {
5411 j = tcg_op_buf_count();
5412 lj++;
5413 while (lj <= j) {
5414 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5416 } else {
5417 tb->size = dc.pc - pc_start;
5418 tb->icount = num_insns;
5421 #if defined(S390X_DEBUG_DISAS)
5422 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5423 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5424 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
5425 qemu_log("\n");
5427 #endif
5430 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5432 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5435 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5437 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5440 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5442 int cc_op;
5443 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5444 cc_op = gen_opc_cc_op[pc_pos];
5445 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5446 env->cc_op = cc_op;