target-arm: dump-guest-memory: add prfpreg notes for aarch64
[qemu/ar7.git] / target-s390x / translate.c
blobc79a2cb57a9e095b28aaa0c6c0441ed9e2fe3670
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
74 #define DISAS_EXCP 4
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
88 return pc;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? "\n" : " ");
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
133 if ((i % 4) == 3) {
134 cpu_fprintf(f, "\n");
135 } else {
136 cpu_fprintf(f, " ");
139 #endif
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
146 #endif
148 cpu_fprintf(f, "\n");
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
153 static TCGv_i64 gbea;
155 static TCGv_i32 cc_op;
156 static TCGv_i64 cc_src;
157 static TCGv_i64 cc_dst;
158 static TCGv_i64 cc_vr;
160 static char cpu_reg_names[32][4];
161 static TCGv_i64 regs[16];
162 static TCGv_i64 fregs[16];
164 void s390x_translate_init(void)
166 int i;
168 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
169 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
170 offsetof(CPUS390XState, psw.addr),
171 "psw_addr");
172 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
173 offsetof(CPUS390XState, psw.mask),
174 "psw_mask");
175 gbea = tcg_global_mem_new_i64(TCG_AREG0,
176 offsetof(CPUS390XState, gbea),
177 "gbea");
179 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
180 "cc_op");
181 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
182 "cc_src");
183 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
184 "cc_dst");
185 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
186 "cc_vr");
188 for (i = 0; i < 16; i++) {
189 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
190 regs[i] = tcg_global_mem_new(TCG_AREG0,
191 offsetof(CPUS390XState, regs[i]),
192 cpu_reg_names[i]);
195 for (i = 0; i < 16; i++) {
196 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
197 fregs[i] = tcg_global_mem_new(TCG_AREG0,
198 offsetof(CPUS390XState, vregs[i][0].d),
199 cpu_reg_names[i + 16]);
203 static TCGv_i64 load_reg(int reg)
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
210 static TCGv_i64 load_freg32_i64(int reg)
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_shri_i64(r, fregs[reg], 32);
214 return r;
217 static void store_reg(int reg, TCGv_i64 v)
219 tcg_gen_mov_i64(regs[reg], v);
222 static void store_freg(int reg, TCGv_i64 v)
224 tcg_gen_mov_i64(fregs[reg], v);
227 static void store_reg32_i64(int reg, TCGv_i64 v)
229 /* 32 bit register writes keep the upper half */
230 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
233 static void store_reg32h_i64(int reg, TCGv_i64 v)
235 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
238 static void store_freg32_i64(int reg, TCGv_i64 v)
240 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
243 static void return_low128(TCGv_i64 dest)
245 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
248 static void update_psw_addr(DisasContext *s)
250 /* psw.addr */
251 tcg_gen_movi_i64(psw_addr, s->pc);
254 static void per_branch(DisasContext *s, bool to_next)
256 #ifndef CONFIG_USER_ONLY
257 tcg_gen_movi_i64(gbea, s->pc);
259 if (s->tb->flags & FLAG_MASK_PER) {
260 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
261 gen_helper_per_branch(cpu_env, gbea, next_pc);
262 if (to_next) {
263 tcg_temp_free_i64(next_pc);
266 #endif
269 static void per_branch_cond(DisasContext *s, TCGCond cond,
270 TCGv_i64 arg1, TCGv_i64 arg2)
272 #ifndef CONFIG_USER_ONLY
273 if (s->tb->flags & FLAG_MASK_PER) {
274 TCGLabel *lab = gen_new_label();
275 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
277 tcg_gen_movi_i64(gbea, s->pc);
278 gen_helper_per_branch(cpu_env, gbea, psw_addr);
280 gen_set_label(lab);
281 } else {
282 TCGv_i64 pc = tcg_const_i64(s->pc);
283 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
284 tcg_temp_free_i64(pc);
286 #endif
289 static void per_breaking_event(DisasContext *s)
291 tcg_gen_movi_i64(gbea, s->pc);
294 static void update_cc_op(DisasContext *s)
296 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
297 tcg_gen_movi_i32(cc_op, s->cc_op);
301 static void potential_page_fault(DisasContext *s)
303 update_psw_addr(s);
304 update_cc_op(s);
307 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
309 return (uint64_t)cpu_lduw_code(env, pc);
312 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
314 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
317 static int get_mem_index(DisasContext *s)
319 switch (s->tb->flags & FLAG_MASK_ASC) {
320 case PSW_ASC_PRIMARY >> 32:
321 return 0;
322 case PSW_ASC_SECONDARY >> 32:
323 return 1;
324 case PSW_ASC_HOME >> 32:
325 return 2;
326 default:
327 tcg_abort();
328 break;
332 static void gen_exception(int excp)
334 TCGv_i32 tmp = tcg_const_i32(excp);
335 gen_helper_exception(cpu_env, tmp);
336 tcg_temp_free_i32(tmp);
339 static void gen_program_exception(DisasContext *s, int code)
341 TCGv_i32 tmp;
343 /* Remember what pgm exeption this was. */
344 tmp = tcg_const_i32(code);
345 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
346 tcg_temp_free_i32(tmp);
348 tmp = tcg_const_i32(s->next_pc - s->pc);
349 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
350 tcg_temp_free_i32(tmp);
352 /* Advance past instruction. */
353 s->pc = s->next_pc;
354 update_psw_addr(s);
356 /* Save off cc. */
357 update_cc_op(s);
359 /* Trigger exception. */
360 gen_exception(EXCP_PGM);
363 static inline void gen_illegal_opcode(DisasContext *s)
365 gen_program_exception(s, PGM_OPERATION);
368 static inline void gen_trap(DisasContext *s)
370 TCGv_i32 t;
372 /* Set DXC to 0xff. */
373 t = tcg_temp_new_i32();
374 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
375 tcg_gen_ori_i32(t, t, 0xff00);
376 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
377 tcg_temp_free_i32(t);
379 gen_program_exception(s, PGM_DATA);
382 #ifndef CONFIG_USER_ONLY
383 static void check_privileged(DisasContext *s)
385 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
386 gen_program_exception(s, PGM_PRIVILEGED);
389 #endif
391 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 TCGv_i64 tmp = tcg_temp_new_i64();
394 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
396 /* Note that d2 is limited to 20 bits, signed. If we crop negative
397 displacements early we create larger immedate addends. */
399 /* Note that addi optimizes the imm==0 case. */
400 if (b2 && x2) {
401 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
402 tcg_gen_addi_i64(tmp, tmp, d2);
403 } else if (b2) {
404 tcg_gen_addi_i64(tmp, regs[b2], d2);
405 } else if (x2) {
406 tcg_gen_addi_i64(tmp, regs[x2], d2);
407 } else {
408 if (need_31) {
409 d2 &= 0x7fffffff;
410 need_31 = false;
412 tcg_gen_movi_i64(tmp, d2);
414 if (need_31) {
415 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
418 return tmp;
421 static inline bool live_cc_data(DisasContext *s)
423 return (s->cc_op != CC_OP_DYNAMIC
424 && s->cc_op != CC_OP_STATIC
425 && s->cc_op > 3);
428 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
430 if (live_cc_data(s)) {
431 tcg_gen_discard_i64(cc_src);
432 tcg_gen_discard_i64(cc_dst);
433 tcg_gen_discard_i64(cc_vr);
435 s->cc_op = CC_OP_CONST0 + val;
438 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_vr);
444 tcg_gen_mov_i64(cc_dst, dst);
445 s->cc_op = op;
448 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
449 TCGv_i64 dst)
451 if (live_cc_data(s)) {
452 tcg_gen_discard_i64(cc_vr);
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 s->cc_op = op;
459 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
460 TCGv_i64 dst, TCGv_i64 vr)
462 tcg_gen_mov_i64(cc_src, src);
463 tcg_gen_mov_i64(cc_dst, dst);
464 tcg_gen_mov_i64(cc_vr, vr);
465 s->cc_op = op;
468 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
470 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
473 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
475 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
478 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
480 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
483 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
485 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
488 /* CC value is in env->cc_op */
489 static void set_cc_static(DisasContext *s)
491 if (live_cc_data(s)) {
492 tcg_gen_discard_i64(cc_src);
493 tcg_gen_discard_i64(cc_dst);
494 tcg_gen_discard_i64(cc_vr);
496 s->cc_op = CC_OP_STATIC;
499 /* calculates cc into cc_op */
500 static void gen_op_calc_cc(DisasContext *s)
502 TCGv_i32 local_cc_op;
503 TCGv_i64 dummy;
505 TCGV_UNUSED_I32(local_cc_op);
506 TCGV_UNUSED_I64(dummy);
507 switch (s->cc_op) {
508 default:
509 dummy = tcg_const_i64(0);
510 /* FALLTHRU */
511 case CC_OP_ADD_64:
512 case CC_OP_ADDU_64:
513 case CC_OP_ADDC_64:
514 case CC_OP_SUB_64:
515 case CC_OP_SUBU_64:
516 case CC_OP_SUBB_64:
517 case CC_OP_ADD_32:
518 case CC_OP_ADDU_32:
519 case CC_OP_ADDC_32:
520 case CC_OP_SUB_32:
521 case CC_OP_SUBU_32:
522 case CC_OP_SUBB_32:
523 local_cc_op = tcg_const_i32(s->cc_op);
524 break;
525 case CC_OP_CONST0:
526 case CC_OP_CONST1:
527 case CC_OP_CONST2:
528 case CC_OP_CONST3:
529 case CC_OP_STATIC:
530 case CC_OP_DYNAMIC:
531 break;
534 switch (s->cc_op) {
535 case CC_OP_CONST0:
536 case CC_OP_CONST1:
537 case CC_OP_CONST2:
538 case CC_OP_CONST3:
539 /* s->cc_op is the cc value */
540 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
541 break;
542 case CC_OP_STATIC:
543 /* env->cc_op already is the cc value */
544 break;
545 case CC_OP_NZ:
546 case CC_OP_ABS_64:
547 case CC_OP_NABS_64:
548 case CC_OP_ABS_32:
549 case CC_OP_NABS_32:
550 case CC_OP_LTGT0_32:
551 case CC_OP_LTGT0_64:
552 case CC_OP_COMP_32:
553 case CC_OP_COMP_64:
554 case CC_OP_NZ_F32:
555 case CC_OP_NZ_F64:
556 case CC_OP_FLOGR:
557 /* 1 argument */
558 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
559 break;
560 case CC_OP_ICM:
561 case CC_OP_LTGT_32:
562 case CC_OP_LTGT_64:
563 case CC_OP_LTUGTU_32:
564 case CC_OP_LTUGTU_64:
565 case CC_OP_TM_32:
566 case CC_OP_TM_64:
567 case CC_OP_SLA_32:
568 case CC_OP_SLA_64:
569 case CC_OP_NZ_F128:
570 /* 2 arguments */
571 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
572 break;
573 case CC_OP_ADD_64:
574 case CC_OP_ADDU_64:
575 case CC_OP_ADDC_64:
576 case CC_OP_SUB_64:
577 case CC_OP_SUBU_64:
578 case CC_OP_SUBB_64:
579 case CC_OP_ADD_32:
580 case CC_OP_ADDU_32:
581 case CC_OP_ADDC_32:
582 case CC_OP_SUB_32:
583 case CC_OP_SUBU_32:
584 case CC_OP_SUBB_32:
585 /* 3 arguments */
586 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
587 break;
588 case CC_OP_DYNAMIC:
589 /* unknown operation - assume 3 arguments and cc_op in env */
590 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
591 break;
592 default:
593 tcg_abort();
596 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
597 tcg_temp_free_i32(local_cc_op);
599 if (!TCGV_IS_UNUSED_I64(dummy)) {
600 tcg_temp_free_i64(dummy);
603 /* We now have cc in cc_op as constant */
604 set_cc_static(s);
607 static int use_goto_tb(DisasContext *s, uint64_t dest)
609 /* NOTE: we handle the case where the TB spans two pages here */
610 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
611 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
612 && !s->singlestep_enabled
613 && !(s->tb->cflags & CF_LAST_IO)
614 && !(s->tb->flags & FLAG_MASK_PER));
617 static void account_noninline_branch(DisasContext *s, int cc_op)
619 #ifdef DEBUG_INLINE_BRANCHES
620 inline_branch_miss[cc_op]++;
621 #endif
624 static void account_inline_branch(DisasContext *s, int cc_op)
626 #ifdef DEBUG_INLINE_BRANCHES
627 inline_branch_hit[cc_op]++;
628 #endif
631 /* Table of mask values to comparison codes, given a comparison as input.
632 For such, CC=3 should not be possible. */
633 static const TCGCond ltgt_cond[16] = {
634 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
635 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
636 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
637 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
638 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
639 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
640 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
641 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
644 /* Table of mask values to comparison codes, given a logic op as input.
645 For such, only CC=0 and CC=1 should be possible. */
646 static const TCGCond nz_cond[16] = {
647 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
648 TCG_COND_NEVER, TCG_COND_NEVER,
649 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
650 TCG_COND_NE, TCG_COND_NE,
651 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
652 TCG_COND_EQ, TCG_COND_EQ,
653 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
654 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
657 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
658 details required to generate a TCG comparison. */
659 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
661 TCGCond cond;
662 enum cc_op old_cc_op = s->cc_op;
664 if (mask == 15 || mask == 0) {
665 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
666 c->u.s32.a = cc_op;
667 c->u.s32.b = cc_op;
668 c->g1 = c->g2 = true;
669 c->is_64 = false;
670 return;
673 /* Find the TCG condition for the mask + cc op. */
674 switch (old_cc_op) {
675 case CC_OP_LTGT0_32:
676 case CC_OP_LTGT0_64:
677 case CC_OP_LTGT_32:
678 case CC_OP_LTGT_64:
679 cond = ltgt_cond[mask];
680 if (cond == TCG_COND_NEVER) {
681 goto do_dynamic;
683 account_inline_branch(s, old_cc_op);
684 break;
686 case CC_OP_LTUGTU_32:
687 case CC_OP_LTUGTU_64:
688 cond = tcg_unsigned_cond(ltgt_cond[mask]);
689 if (cond == TCG_COND_NEVER) {
690 goto do_dynamic;
692 account_inline_branch(s, old_cc_op);
693 break;
695 case CC_OP_NZ:
696 cond = nz_cond[mask];
697 if (cond == TCG_COND_NEVER) {
698 goto do_dynamic;
700 account_inline_branch(s, old_cc_op);
701 break;
703 case CC_OP_TM_32:
704 case CC_OP_TM_64:
705 switch (mask) {
706 case 8:
707 cond = TCG_COND_EQ;
708 break;
709 case 4 | 2 | 1:
710 cond = TCG_COND_NE;
711 break;
712 default:
713 goto do_dynamic;
715 account_inline_branch(s, old_cc_op);
716 break;
718 case CC_OP_ICM:
719 switch (mask) {
720 case 8:
721 cond = TCG_COND_EQ;
722 break;
723 case 4 | 2 | 1:
724 case 4 | 2:
725 cond = TCG_COND_NE;
726 break;
727 default:
728 goto do_dynamic;
730 account_inline_branch(s, old_cc_op);
731 break;
733 case CC_OP_FLOGR:
734 switch (mask & 0xa) {
735 case 8: /* src == 0 -> no one bit found */
736 cond = TCG_COND_EQ;
737 break;
738 case 2: /* src != 0 -> one bit found */
739 cond = TCG_COND_NE;
740 break;
741 default:
742 goto do_dynamic;
744 account_inline_branch(s, old_cc_op);
745 break;
747 case CC_OP_ADDU_32:
748 case CC_OP_ADDU_64:
749 switch (mask) {
750 case 8 | 2: /* vr == 0 */
751 cond = TCG_COND_EQ;
752 break;
753 case 4 | 1: /* vr != 0 */
754 cond = TCG_COND_NE;
755 break;
756 case 8 | 4: /* no carry -> vr >= src */
757 cond = TCG_COND_GEU;
758 break;
759 case 2 | 1: /* carry -> vr < src */
760 cond = TCG_COND_LTU;
761 break;
762 default:
763 goto do_dynamic;
765 account_inline_branch(s, old_cc_op);
766 break;
768 case CC_OP_SUBU_32:
769 case CC_OP_SUBU_64:
770 /* Note that CC=0 is impossible; treat it as dont-care. */
771 switch (mask & 7) {
772 case 2: /* zero -> op1 == op2 */
773 cond = TCG_COND_EQ;
774 break;
775 case 4 | 1: /* !zero -> op1 != op2 */
776 cond = TCG_COND_NE;
777 break;
778 case 4: /* borrow (!carry) -> op1 < op2 */
779 cond = TCG_COND_LTU;
780 break;
781 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
782 cond = TCG_COND_GEU;
783 break;
784 default:
785 goto do_dynamic;
787 account_inline_branch(s, old_cc_op);
788 break;
790 default:
791 do_dynamic:
792 /* Calculate cc value. */
793 gen_op_calc_cc(s);
794 /* FALLTHRU */
796 case CC_OP_STATIC:
797 /* Jump based on CC. We'll load up the real cond below;
798 the assignment here merely avoids a compiler warning. */
799 account_noninline_branch(s, old_cc_op);
800 old_cc_op = CC_OP_STATIC;
801 cond = TCG_COND_NEVER;
802 break;
805 /* Load up the arguments of the comparison. */
806 c->is_64 = true;
807 c->g1 = c->g2 = false;
808 switch (old_cc_op) {
809 case CC_OP_LTGT0_32:
810 c->is_64 = false;
811 c->u.s32.a = tcg_temp_new_i32();
812 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
813 c->u.s32.b = tcg_const_i32(0);
814 break;
815 case CC_OP_LTGT_32:
816 case CC_OP_LTUGTU_32:
817 case CC_OP_SUBU_32:
818 c->is_64 = false;
819 c->u.s32.a = tcg_temp_new_i32();
820 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
821 c->u.s32.b = tcg_temp_new_i32();
822 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
823 break;
825 case CC_OP_LTGT0_64:
826 case CC_OP_NZ:
827 case CC_OP_FLOGR:
828 c->u.s64.a = cc_dst;
829 c->u.s64.b = tcg_const_i64(0);
830 c->g1 = true;
831 break;
832 case CC_OP_LTGT_64:
833 case CC_OP_LTUGTU_64:
834 case CC_OP_SUBU_64:
835 c->u.s64.a = cc_src;
836 c->u.s64.b = cc_dst;
837 c->g1 = c->g2 = true;
838 break;
840 case CC_OP_TM_32:
841 case CC_OP_TM_64:
842 case CC_OP_ICM:
843 c->u.s64.a = tcg_temp_new_i64();
844 c->u.s64.b = tcg_const_i64(0);
845 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
846 break;
848 case CC_OP_ADDU_32:
849 c->is_64 = false;
850 c->u.s32.a = tcg_temp_new_i32();
851 c->u.s32.b = tcg_temp_new_i32();
852 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
853 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
854 tcg_gen_movi_i32(c->u.s32.b, 0);
855 } else {
856 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
858 break;
860 case CC_OP_ADDU_64:
861 c->u.s64.a = cc_vr;
862 c->g1 = true;
863 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
864 c->u.s64.b = tcg_const_i64(0);
865 } else {
866 c->u.s64.b = cc_src;
867 c->g2 = true;
869 break;
871 case CC_OP_STATIC:
872 c->is_64 = false;
873 c->u.s32.a = cc_op;
874 c->g1 = true;
875 switch (mask) {
876 case 0x8 | 0x4 | 0x2: /* cc != 3 */
877 cond = TCG_COND_NE;
878 c->u.s32.b = tcg_const_i32(3);
879 break;
880 case 0x8 | 0x4 | 0x1: /* cc != 2 */
881 cond = TCG_COND_NE;
882 c->u.s32.b = tcg_const_i32(2);
883 break;
884 case 0x8 | 0x2 | 0x1: /* cc != 1 */
885 cond = TCG_COND_NE;
886 c->u.s32.b = tcg_const_i32(1);
887 break;
888 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
889 cond = TCG_COND_EQ;
890 c->g1 = false;
891 c->u.s32.a = tcg_temp_new_i32();
892 c->u.s32.b = tcg_const_i32(0);
893 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
894 break;
895 case 0x8 | 0x4: /* cc < 2 */
896 cond = TCG_COND_LTU;
897 c->u.s32.b = tcg_const_i32(2);
898 break;
899 case 0x8: /* cc == 0 */
900 cond = TCG_COND_EQ;
901 c->u.s32.b = tcg_const_i32(0);
902 break;
903 case 0x4 | 0x2 | 0x1: /* cc != 0 */
904 cond = TCG_COND_NE;
905 c->u.s32.b = tcg_const_i32(0);
906 break;
907 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
908 cond = TCG_COND_NE;
909 c->g1 = false;
910 c->u.s32.a = tcg_temp_new_i32();
911 c->u.s32.b = tcg_const_i32(0);
912 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
913 break;
914 case 0x4: /* cc == 1 */
915 cond = TCG_COND_EQ;
916 c->u.s32.b = tcg_const_i32(1);
917 break;
918 case 0x2 | 0x1: /* cc > 1 */
919 cond = TCG_COND_GTU;
920 c->u.s32.b = tcg_const_i32(1);
921 break;
922 case 0x2: /* cc == 2 */
923 cond = TCG_COND_EQ;
924 c->u.s32.b = tcg_const_i32(2);
925 break;
926 case 0x1: /* cc == 3 */
927 cond = TCG_COND_EQ;
928 c->u.s32.b = tcg_const_i32(3);
929 break;
930 default:
931 /* CC is masked by something else: (8 >> cc) & mask. */
932 cond = TCG_COND_NE;
933 c->g1 = false;
934 c->u.s32.a = tcg_const_i32(8);
935 c->u.s32.b = tcg_const_i32(0);
936 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
937 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
938 break;
940 break;
942 default:
943 abort();
945 c->cond = cond;
948 static void free_compare(DisasCompare *c)
950 if (!c->g1) {
951 if (c->is_64) {
952 tcg_temp_free_i64(c->u.s64.a);
953 } else {
954 tcg_temp_free_i32(c->u.s32.a);
957 if (!c->g2) {
958 if (c->is_64) {
959 tcg_temp_free_i64(c->u.s64.b);
960 } else {
961 tcg_temp_free_i32(c->u.s32.b);
966 /* ====================================================================== */
967 /* Define the insn format enumeration. */
968 #define F0(N) FMT_##N,
969 #define F1(N, X1) F0(N)
970 #define F2(N, X1, X2) F0(N)
971 #define F3(N, X1, X2, X3) F0(N)
972 #define F4(N, X1, X2, X3, X4) F0(N)
973 #define F5(N, X1, X2, X3, X4, X5) F0(N)
975 typedef enum {
976 #include "insn-format.def"
977 } DisasFormat;
979 #undef F0
980 #undef F1
981 #undef F2
982 #undef F3
983 #undef F4
984 #undef F5
986 /* Define a structure to hold the decoded fields. We'll store each inside
987 an array indexed by an enum. In order to conserve memory, we'll arrange
988 for fields that do not exist at the same time to overlap, thus the "C"
989 for compact. For checking purposes there is an "O" for original index
990 as well that will be applied to availability bitmaps. */
992 enum DisasFieldIndexO {
993 FLD_O_r1,
994 FLD_O_r2,
995 FLD_O_r3,
996 FLD_O_m1,
997 FLD_O_m3,
998 FLD_O_m4,
999 FLD_O_b1,
1000 FLD_O_b2,
1001 FLD_O_b4,
1002 FLD_O_d1,
1003 FLD_O_d2,
1004 FLD_O_d4,
1005 FLD_O_x2,
1006 FLD_O_l1,
1007 FLD_O_l2,
1008 FLD_O_i1,
1009 FLD_O_i2,
1010 FLD_O_i3,
1011 FLD_O_i4,
1012 FLD_O_i5
1015 enum DisasFieldIndexC {
1016 FLD_C_r1 = 0,
1017 FLD_C_m1 = 0,
1018 FLD_C_b1 = 0,
1019 FLD_C_i1 = 0,
1021 FLD_C_r2 = 1,
1022 FLD_C_b2 = 1,
1023 FLD_C_i2 = 1,
1025 FLD_C_r3 = 2,
1026 FLD_C_m3 = 2,
1027 FLD_C_i3 = 2,
1029 FLD_C_m4 = 3,
1030 FLD_C_b4 = 3,
1031 FLD_C_i4 = 3,
1032 FLD_C_l1 = 3,
1034 FLD_C_i5 = 4,
1035 FLD_C_d1 = 4,
1037 FLD_C_d2 = 5,
1039 FLD_C_d4 = 6,
1040 FLD_C_x2 = 6,
1041 FLD_C_l2 = 6,
1043 NUM_C_FIELD = 7
1046 struct DisasFields {
1047 uint64_t raw_insn;
1048 unsigned op:8;
1049 unsigned op2:8;
1050 unsigned presentC:16;
1051 unsigned int presentO;
1052 int c[NUM_C_FIELD];
1055 /* This is the way fields are to be accessed out of DisasFields. */
1056 #define have_field(S, F) have_field1((S), FLD_O_##F)
1057 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1059 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1061 return (f->presentO >> c) & 1;
1064 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1065 enum DisasFieldIndexC c)
1067 assert(have_field1(f, o));
1068 return f->c[c];
1071 /* Describe the layout of each field in each format. */
1072 typedef struct DisasField {
1073 unsigned int beg:8;
1074 unsigned int size:8;
1075 unsigned int type:2;
1076 unsigned int indexC:6;
1077 enum DisasFieldIndexO indexO:8;
1078 } DisasField;
1080 typedef struct DisasFormatInfo {
1081 DisasField op[NUM_C_FIELD];
1082 } DisasFormatInfo;
1084 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1085 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1086 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1087 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1088 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1089 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1090 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1091 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1092 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1093 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1094 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1095 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1096 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1097 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1099 #define F0(N) { { } },
1100 #define F1(N, X1) { { X1 } },
1101 #define F2(N, X1, X2) { { X1, X2 } },
1102 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1103 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1104 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1106 static const DisasFormatInfo format_info[] = {
1107 #include "insn-format.def"
1110 #undef F0
1111 #undef F1
1112 #undef F2
1113 #undef F3
1114 #undef F4
1115 #undef F5
1116 #undef R
1117 #undef M
1118 #undef BD
1119 #undef BXD
1120 #undef BDL
1121 #undef BXDL
1122 #undef I
1123 #undef L
1125 /* Generally, we'll extract operands into this structures, operate upon
1126 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1127 of routines below for more details. */
1128 typedef struct {
1129 bool g_out, g_out2, g_in1, g_in2;
1130 TCGv_i64 out, out2, in1, in2;
1131 TCGv_i64 addr1;
1132 } DisasOps;
1134 /* Instructions can place constraints on their operands, raising specification
1135 exceptions if they are violated. To make this easy to automate, each "in1",
1136 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1137 of the following, or 0. To make this easy to document, we'll put the
1138 SPEC_<name> defines next to <name>. */
1140 #define SPEC_r1_even 1
1141 #define SPEC_r2_even 2
1142 #define SPEC_r3_even 4
1143 #define SPEC_r1_f128 8
1144 #define SPEC_r2_f128 16
1146 /* Return values from translate_one, indicating the state of the TB. */
1147 typedef enum {
1148 /* Continue the TB. */
1149 NO_EXIT,
1150 /* We have emitted one or more goto_tb. No fixup required. */
1151 EXIT_GOTO_TB,
1152 /* We are not using a goto_tb (for whatever reason), but have updated
1153 the PC (for whatever reason), so there's no need to do it again on
1154 exiting the TB. */
1155 EXIT_PC_UPDATED,
1156 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1157 updated the PC for the next instruction to be executed. */
1158 EXIT_PC_STALE,
1159 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1160 No following code will be executed. */
1161 EXIT_NORETURN,
1162 } ExitStatus;
1164 typedef enum DisasFacility {
1165 FAC_Z, /* zarch (default) */
1166 FAC_CASS, /* compare and swap and store */
1167 FAC_CASS2, /* compare and swap and store 2*/
1168 FAC_DFP, /* decimal floating point */
1169 FAC_DFPR, /* decimal floating point rounding */
1170 FAC_DO, /* distinct operands */
1171 FAC_EE, /* execute extensions */
1172 FAC_EI, /* extended immediate */
1173 FAC_FPE, /* floating point extension */
1174 FAC_FPSSH, /* floating point support sign handling */
1175 FAC_FPRGR, /* FPR-GR transfer */
1176 FAC_GIE, /* general instructions extension */
1177 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1178 FAC_HW, /* high-word */
1179 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1180 FAC_MIE, /* miscellaneous-instruction-extensions */
1181 FAC_LAT, /* load-and-trap */
1182 FAC_LOC, /* load/store on condition */
1183 FAC_LD, /* long displacement */
1184 FAC_PC, /* population count */
1185 FAC_SCF, /* store clock fast */
1186 FAC_SFLE, /* store facility list extended */
1187 FAC_ILA, /* interlocked access facility 1 */
1188 } DisasFacility;
1190 struct DisasInsn {
1191 unsigned opc:16;
1192 DisasFormat fmt:8;
1193 DisasFacility fac:8;
1194 unsigned spec:8;
1196 const char *name;
1198 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1199 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1200 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1201 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1202 void (*help_cout)(DisasContext *, DisasOps *);
1203 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1205 uint64_t data;
1208 /* ====================================================================== */
1209 /* Miscellaneous helpers, used by several operations. */
1211 static void help_l2_shift(DisasContext *s, DisasFields *f,
1212 DisasOps *o, int mask)
1214 int b2 = get_field(f, b2);
1215 int d2 = get_field(f, d2);
1217 if (b2 == 0) {
1218 o->in2 = tcg_const_i64(d2 & mask);
1219 } else {
1220 o->in2 = get_address(s, 0, b2, d2);
1221 tcg_gen_andi_i64(o->in2, o->in2, mask);
1225 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1227 if (dest == s->next_pc) {
1228 per_branch(s, true);
1229 return NO_EXIT;
1231 if (use_goto_tb(s, dest)) {
1232 update_cc_op(s);
1233 per_breaking_event(s);
1234 tcg_gen_goto_tb(0);
1235 tcg_gen_movi_i64(psw_addr, dest);
1236 tcg_gen_exit_tb((uintptr_t)s->tb);
1237 return EXIT_GOTO_TB;
1238 } else {
1239 tcg_gen_movi_i64(psw_addr, dest);
1240 per_branch(s, false);
1241 return EXIT_PC_UPDATED;
1245 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1246 bool is_imm, int imm, TCGv_i64 cdest)
1248 ExitStatus ret;
1249 uint64_t dest = s->pc + 2 * imm;
1250 TCGLabel *lab;
1252 /* Take care of the special cases first. */
1253 if (c->cond == TCG_COND_NEVER) {
1254 ret = NO_EXIT;
1255 goto egress;
1257 if (is_imm) {
1258 if (dest == s->next_pc) {
1259 /* Branch to next. */
1260 per_branch(s, true);
1261 ret = NO_EXIT;
1262 goto egress;
1264 if (c->cond == TCG_COND_ALWAYS) {
1265 ret = help_goto_direct(s, dest);
1266 goto egress;
1268 } else {
1269 if (TCGV_IS_UNUSED_I64(cdest)) {
1270 /* E.g. bcr %r0 -> no branch. */
1271 ret = NO_EXIT;
1272 goto egress;
1274 if (c->cond == TCG_COND_ALWAYS) {
1275 tcg_gen_mov_i64(psw_addr, cdest);
1276 per_branch(s, false);
1277 ret = EXIT_PC_UPDATED;
1278 goto egress;
1282 if (use_goto_tb(s, s->next_pc)) {
1283 if (is_imm && use_goto_tb(s, dest)) {
1284 /* Both exits can use goto_tb. */
1285 update_cc_op(s);
1287 lab = gen_new_label();
1288 if (c->is_64) {
1289 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1290 } else {
1291 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1294 /* Branch not taken. */
1295 tcg_gen_goto_tb(0);
1296 tcg_gen_movi_i64(psw_addr, s->next_pc);
1297 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1299 /* Branch taken. */
1300 gen_set_label(lab);
1301 per_breaking_event(s);
1302 tcg_gen_goto_tb(1);
1303 tcg_gen_movi_i64(psw_addr, dest);
1304 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1306 ret = EXIT_GOTO_TB;
1307 } else {
1308 /* Fallthru can use goto_tb, but taken branch cannot. */
1309 /* Store taken branch destination before the brcond. This
1310 avoids having to allocate a new local temp to hold it.
1311 We'll overwrite this in the not taken case anyway. */
1312 if (!is_imm) {
1313 tcg_gen_mov_i64(psw_addr, cdest);
1316 lab = gen_new_label();
1317 if (c->is_64) {
1318 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1319 } else {
1320 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1323 /* Branch not taken. */
1324 update_cc_op(s);
1325 tcg_gen_goto_tb(0);
1326 tcg_gen_movi_i64(psw_addr, s->next_pc);
1327 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1329 gen_set_label(lab);
1330 if (is_imm) {
1331 tcg_gen_movi_i64(psw_addr, dest);
1333 per_breaking_event(s);
1334 ret = EXIT_PC_UPDATED;
1336 } else {
1337 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1338 Most commonly we're single-stepping or some other condition that
1339 disables all use of goto_tb. Just update the PC and exit. */
1341 TCGv_i64 next = tcg_const_i64(s->next_pc);
1342 if (is_imm) {
1343 cdest = tcg_const_i64(dest);
1346 if (c->is_64) {
1347 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1348 cdest, next);
1349 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1350 } else {
1351 TCGv_i32 t0 = tcg_temp_new_i32();
1352 TCGv_i64 t1 = tcg_temp_new_i64();
1353 TCGv_i64 z = tcg_const_i64(0);
1354 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1355 tcg_gen_extu_i32_i64(t1, t0);
1356 tcg_temp_free_i32(t0);
1357 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1358 per_branch_cond(s, TCG_COND_NE, t1, z);
1359 tcg_temp_free_i64(t1);
1360 tcg_temp_free_i64(z);
1363 if (is_imm) {
1364 tcg_temp_free_i64(cdest);
1366 tcg_temp_free_i64(next);
1368 ret = EXIT_PC_UPDATED;
1371 egress:
1372 free_compare(c);
1373 return ret;
1376 /* ====================================================================== */
1377 /* The operations. These perform the bulk of the work for any insn,
1378 usually after the operands have been loaded and output initialized. */
1380 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1382 TCGv_i64 z, n;
1383 z = tcg_const_i64(0);
1384 n = tcg_temp_new_i64();
1385 tcg_gen_neg_i64(n, o->in2);
1386 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1387 tcg_temp_free_i64(n);
1388 tcg_temp_free_i64(z);
1389 return NO_EXIT;
1392 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1394 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1395 return NO_EXIT;
1398 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1400 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1401 return NO_EXIT;
1404 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1406 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1407 tcg_gen_mov_i64(o->out2, o->in2);
1408 return NO_EXIT;
1411 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1413 tcg_gen_add_i64(o->out, o->in1, o->in2);
1414 return NO_EXIT;
1417 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1419 DisasCompare cmp;
1420 TCGv_i64 carry;
1422 tcg_gen_add_i64(o->out, o->in1, o->in2);
1424 /* The carry flag is the msb of CC, therefore the branch mask that would
1425 create that comparison is 3. Feeding the generated comparison to
1426 setcond produces the carry flag that we desire. */
1427 disas_jcc(s, &cmp, 3);
1428 carry = tcg_temp_new_i64();
1429 if (cmp.is_64) {
1430 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1431 } else {
1432 TCGv_i32 t = tcg_temp_new_i32();
1433 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1434 tcg_gen_extu_i32_i64(carry, t);
1435 tcg_temp_free_i32(t);
1437 free_compare(&cmp);
1439 tcg_gen_add_i64(o->out, o->out, carry);
1440 tcg_temp_free_i64(carry);
1441 return NO_EXIT;
1444 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1446 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1447 return NO_EXIT;
1450 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1452 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1453 return NO_EXIT;
1456 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1458 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1459 return_low128(o->out2);
1460 return NO_EXIT;
1463 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1465 tcg_gen_and_i64(o->out, o->in1, o->in2);
1466 return NO_EXIT;
1469 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1471 int shift = s->insn->data & 0xff;
1472 int size = s->insn->data >> 8;
1473 uint64_t mask = ((1ull << size) - 1) << shift;
1475 assert(!o->g_in2);
1476 tcg_gen_shli_i64(o->in2, o->in2, shift);
1477 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1478 tcg_gen_and_i64(o->out, o->in1, o->in2);
1480 /* Produce the CC from only the bits manipulated. */
1481 tcg_gen_andi_i64(cc_dst, o->out, mask);
1482 set_cc_nz_u64(s, cc_dst);
1483 return NO_EXIT;
1486 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1488 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1489 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1490 tcg_gen_mov_i64(psw_addr, o->in2);
1491 per_branch(s, false);
1492 return EXIT_PC_UPDATED;
1493 } else {
1494 return NO_EXIT;
1498 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1500 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1501 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1504 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1506 int m1 = get_field(s->fields, m1);
1507 bool is_imm = have_field(s->fields, i2);
1508 int imm = is_imm ? get_field(s->fields, i2) : 0;
1509 DisasCompare c;
1511 disas_jcc(s, &c, m1);
1512 return help_branch(s, &c, is_imm, imm, o->in2);
1515 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1517 int r1 = get_field(s->fields, r1);
1518 bool is_imm = have_field(s->fields, i2);
1519 int imm = is_imm ? get_field(s->fields, i2) : 0;
1520 DisasCompare c;
1521 TCGv_i64 t;
1523 c.cond = TCG_COND_NE;
1524 c.is_64 = false;
1525 c.g1 = false;
1526 c.g2 = false;
1528 t = tcg_temp_new_i64();
1529 tcg_gen_subi_i64(t, regs[r1], 1);
1530 store_reg32_i64(r1, t);
1531 c.u.s32.a = tcg_temp_new_i32();
1532 c.u.s32.b = tcg_const_i32(0);
1533 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1534 tcg_temp_free_i64(t);
1536 return help_branch(s, &c, is_imm, imm, o->in2);
1539 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1541 int r1 = get_field(s->fields, r1);
1542 int imm = get_field(s->fields, i2);
1543 DisasCompare c;
1544 TCGv_i64 t;
1546 c.cond = TCG_COND_NE;
1547 c.is_64 = false;
1548 c.g1 = false;
1549 c.g2 = false;
1551 t = tcg_temp_new_i64();
1552 tcg_gen_shri_i64(t, regs[r1], 32);
1553 tcg_gen_subi_i64(t, t, 1);
1554 store_reg32h_i64(r1, t);
1555 c.u.s32.a = tcg_temp_new_i32();
1556 c.u.s32.b = tcg_const_i32(0);
1557 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1558 tcg_temp_free_i64(t);
1560 return help_branch(s, &c, 1, imm, o->in2);
1563 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1565 int r1 = get_field(s->fields, r1);
1566 bool is_imm = have_field(s->fields, i2);
1567 int imm = is_imm ? get_field(s->fields, i2) : 0;
1568 DisasCompare c;
1570 c.cond = TCG_COND_NE;
1571 c.is_64 = true;
1572 c.g1 = true;
1573 c.g2 = false;
1575 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1576 c.u.s64.a = regs[r1];
1577 c.u.s64.b = tcg_const_i64(0);
1579 return help_branch(s, &c, is_imm, imm, o->in2);
1582 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1584 int r1 = get_field(s->fields, r1);
1585 int r3 = get_field(s->fields, r3);
1586 bool is_imm = have_field(s->fields, i2);
1587 int imm = is_imm ? get_field(s->fields, i2) : 0;
1588 DisasCompare c;
1589 TCGv_i64 t;
1591 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1592 c.is_64 = false;
1593 c.g1 = false;
1594 c.g2 = false;
1596 t = tcg_temp_new_i64();
1597 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1598 c.u.s32.a = tcg_temp_new_i32();
1599 c.u.s32.b = tcg_temp_new_i32();
1600 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1601 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1602 store_reg32_i64(r1, t);
1603 tcg_temp_free_i64(t);
1605 return help_branch(s, &c, is_imm, imm, o->in2);
1608 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1610 int r1 = get_field(s->fields, r1);
1611 int r3 = get_field(s->fields, r3);
1612 bool is_imm = have_field(s->fields, i2);
1613 int imm = is_imm ? get_field(s->fields, i2) : 0;
1614 DisasCompare c;
1616 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1617 c.is_64 = true;
1619 if (r1 == (r3 | 1)) {
1620 c.u.s64.b = load_reg(r3 | 1);
1621 c.g2 = false;
1622 } else {
1623 c.u.s64.b = regs[r3 | 1];
1624 c.g2 = true;
1627 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1628 c.u.s64.a = regs[r1];
1629 c.g1 = true;
1631 return help_branch(s, &c, is_imm, imm, o->in2);
1634 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1636 int imm, m3 = get_field(s->fields, m3);
1637 bool is_imm;
1638 DisasCompare c;
1640 c.cond = ltgt_cond[m3];
1641 if (s->insn->data) {
1642 c.cond = tcg_unsigned_cond(c.cond);
1644 c.is_64 = c.g1 = c.g2 = true;
1645 c.u.s64.a = o->in1;
1646 c.u.s64.b = o->in2;
1648 is_imm = have_field(s->fields, i4);
1649 if (is_imm) {
1650 imm = get_field(s->fields, i4);
1651 } else {
1652 imm = 0;
1653 o->out = get_address(s, 0, get_field(s->fields, b4),
1654 get_field(s->fields, d4));
1657 return help_branch(s, &c, is_imm, imm, o->out);
1660 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1662 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1663 set_cc_static(s);
1664 return NO_EXIT;
1667 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1669 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1670 set_cc_static(s);
1671 return NO_EXIT;
1674 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1676 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1677 set_cc_static(s);
1678 return NO_EXIT;
1681 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1683 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1684 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1685 tcg_temp_free_i32(m3);
1686 gen_set_cc_nz_f32(s, o->in2);
1687 return NO_EXIT;
1690 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 gen_set_cc_nz_f64(s, o->in2);
1696 return NO_EXIT;
1699 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 gen_set_cc_nz_f128(s, o->in1, o->in2);
1705 return NO_EXIT;
1708 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 gen_set_cc_nz_f32(s, o->in2);
1714 return NO_EXIT;
1717 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 gen_set_cc_nz_f64(s, o->in2);
1723 return NO_EXIT;
1726 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1728 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1730 tcg_temp_free_i32(m3);
1731 gen_set_cc_nz_f128(s, o->in1, o->in2);
1732 return NO_EXIT;
1735 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1737 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1739 tcg_temp_free_i32(m3);
1740 gen_set_cc_nz_f32(s, o->in2);
1741 return NO_EXIT;
1744 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1749 gen_set_cc_nz_f64(s, o->in2);
1750 return NO_EXIT;
1753 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 gen_set_cc_nz_f128(s, o->in1, o->in2);
1759 return NO_EXIT;
1762 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 gen_set_cc_nz_f32(s, o->in2);
1768 return NO_EXIT;
1771 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1773 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1775 tcg_temp_free_i32(m3);
1776 gen_set_cc_nz_f64(s, o->in2);
1777 return NO_EXIT;
1780 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1782 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1784 tcg_temp_free_i32(m3);
1785 gen_set_cc_nz_f128(s, o->in1, o->in2);
1786 return NO_EXIT;
1789 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1791 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1792 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1793 tcg_temp_free_i32(m3);
1794 return NO_EXIT;
1797 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 return NO_EXIT;
1805 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1807 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1808 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1809 tcg_temp_free_i32(m3);
1810 return_low128(o->out2);
1811 return NO_EXIT;
1814 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1816 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1817 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1818 tcg_temp_free_i32(m3);
1819 return NO_EXIT;
1822 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1824 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1825 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1826 tcg_temp_free_i32(m3);
1827 return NO_EXIT;
1830 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1832 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1833 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1834 tcg_temp_free_i32(m3);
1835 return_low128(o->out2);
1836 return NO_EXIT;
1839 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1841 int r2 = get_field(s->fields, r2);
1842 TCGv_i64 len = tcg_temp_new_i64();
1844 potential_page_fault(s);
1845 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1846 set_cc_static(s);
1847 return_low128(o->out);
1849 tcg_gen_add_i64(regs[r2], regs[r2], len);
1850 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1851 tcg_temp_free_i64(len);
1853 return NO_EXIT;
1856 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1858 int l = get_field(s->fields, l1);
1859 TCGv_i32 vl;
1861 switch (l + 1) {
1862 case 1:
1863 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1864 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1865 break;
1866 case 2:
1867 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1868 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1869 break;
1870 case 4:
1871 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1872 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1873 break;
1874 case 8:
1875 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1876 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1877 break;
1878 default:
1879 potential_page_fault(s);
1880 vl = tcg_const_i32(l);
1881 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1882 tcg_temp_free_i32(vl);
1883 set_cc_static(s);
1884 return NO_EXIT;
1886 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1887 return NO_EXIT;
1890 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1892 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1893 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1894 potential_page_fault(s);
1895 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1896 tcg_temp_free_i32(r1);
1897 tcg_temp_free_i32(r3);
1898 set_cc_static(s);
1899 return NO_EXIT;
1902 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1904 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1905 TCGv_i32 t1 = tcg_temp_new_i32();
1906 tcg_gen_extrl_i64_i32(t1, o->in1);
1907 potential_page_fault(s);
1908 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1909 set_cc_static(s);
1910 tcg_temp_free_i32(t1);
1911 tcg_temp_free_i32(m3);
1912 return NO_EXIT;
1915 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1917 potential_page_fault(s);
1918 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1919 set_cc_static(s);
1920 return_low128(o->in2);
1921 return NO_EXIT;
1924 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1926 TCGv_i64 t = tcg_temp_new_i64();
1927 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1928 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1929 tcg_gen_or_i64(o->out, o->out, t);
1930 tcg_temp_free_i64(t);
1931 return NO_EXIT;
1934 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1936 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1937 int d2 = get_field(s->fields, d2);
1938 int b2 = get_field(s->fields, b2);
1939 int is_64 = s->insn->data;
1940 TCGv_i64 addr, mem, cc, z;
1942 /* Note that in1 = R3 (new value) and
1943 in2 = (zero-extended) R1 (expected value). */
1945 /* Load the memory into the (temporary) output. While the PoO only talks
1946 about moving the memory to R1 on inequality, if we include equality it
1947 means that R1 is equal to the memory in all conditions. */
1948 addr = get_address(s, 0, b2, d2);
1949 if (is_64) {
1950 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1951 } else {
1952 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1955 /* Are the memory and expected values (un)equal? Note that this setcond
1956 produces the output CC value, thus the NE sense of the test. */
1957 cc = tcg_temp_new_i64();
1958 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1960 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1961 Recall that we are allowed to unconditionally issue the store (and
1962 thus any possible write trap), so (re-)store the original contents
1963 of MEM in case of inequality. */
1964 z = tcg_const_i64(0);
1965 mem = tcg_temp_new_i64();
1966 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1967 if (is_64) {
1968 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1969 } else {
1970 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1972 tcg_temp_free_i64(z);
1973 tcg_temp_free_i64(mem);
1974 tcg_temp_free_i64(addr);
1976 /* Store CC back to cc_op. Wait until after the store so that any
1977 exception gets the old cc_op value. */
1978 tcg_gen_extrl_i64_i32(cc_op, cc);
1979 tcg_temp_free_i64(cc);
1980 set_cc_static(s);
1981 return NO_EXIT;
1984 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1986 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1987 int r1 = get_field(s->fields, r1);
1988 int r3 = get_field(s->fields, r3);
1989 int d2 = get_field(s->fields, d2);
1990 int b2 = get_field(s->fields, b2);
1991 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1993 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1995 addrh = get_address(s, 0, b2, d2);
1996 addrl = get_address(s, 0, b2, d2 + 8);
1997 outh = tcg_temp_new_i64();
1998 outl = tcg_temp_new_i64();
2000 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2001 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2003 /* Fold the double-word compare with arithmetic. */
2004 cc = tcg_temp_new_i64();
2005 z = tcg_temp_new_i64();
2006 tcg_gen_xor_i64(cc, outh, regs[r1]);
2007 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2008 tcg_gen_or_i64(cc, cc, z);
2009 tcg_gen_movi_i64(z, 0);
2010 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2012 memh = tcg_temp_new_i64();
2013 meml = tcg_temp_new_i64();
2014 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2015 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2016 tcg_temp_free_i64(z);
2018 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2019 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2020 tcg_temp_free_i64(memh);
2021 tcg_temp_free_i64(meml);
2022 tcg_temp_free_i64(addrh);
2023 tcg_temp_free_i64(addrl);
2025 /* Save back state now that we've passed all exceptions. */
2026 tcg_gen_mov_i64(regs[r1], outh);
2027 tcg_gen_mov_i64(regs[r1 + 1], outl);
2028 tcg_gen_extrl_i64_i32(cc_op, cc);
2029 tcg_temp_free_i64(outh);
2030 tcg_temp_free_i64(outl);
2031 tcg_temp_free_i64(cc);
2032 set_cc_static(s);
2033 return NO_EXIT;
2036 #ifndef CONFIG_USER_ONLY
2037 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2039 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2040 check_privileged(s);
2041 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2042 tcg_temp_free_i32(r1);
2043 set_cc_static(s);
2044 return NO_EXIT;
2046 #endif
2048 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2050 TCGv_i64 t1 = tcg_temp_new_i64();
2051 TCGv_i32 t2 = tcg_temp_new_i32();
2052 tcg_gen_extrl_i64_i32(t2, o->in1);
2053 gen_helper_cvd(t1, t2);
2054 tcg_temp_free_i32(t2);
2055 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2056 tcg_temp_free_i64(t1);
2057 return NO_EXIT;
2060 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2062 int m3 = get_field(s->fields, m3);
2063 TCGLabel *lab = gen_new_label();
2064 TCGCond c;
2066 c = tcg_invert_cond(ltgt_cond[m3]);
2067 if (s->insn->data) {
2068 c = tcg_unsigned_cond(c);
2070 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2072 /* Trap. */
2073 gen_trap(s);
2075 gen_set_label(lab);
2076 return NO_EXIT;
2079 #ifndef CONFIG_USER_ONLY
2080 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2082 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2083 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2084 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2086 check_privileged(s);
2087 update_psw_addr(s);
2088 gen_op_calc_cc(s);
2090 gen_helper_diag(cpu_env, r1, r3, func_code);
2092 tcg_temp_free_i32(func_code);
2093 tcg_temp_free_i32(r3);
2094 tcg_temp_free_i32(r1);
2095 return NO_EXIT;
2097 #endif
2099 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2101 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2102 return_low128(o->out);
2103 return NO_EXIT;
2106 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2108 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2109 return_low128(o->out);
2110 return NO_EXIT;
2113 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2115 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2116 return_low128(o->out);
2117 return NO_EXIT;
2120 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2122 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2123 return_low128(o->out);
2124 return NO_EXIT;
2127 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2129 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2130 return NO_EXIT;
2133 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2135 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2136 return NO_EXIT;
2139 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2141 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2142 return_low128(o->out2);
2143 return NO_EXIT;
2146 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2148 int r2 = get_field(s->fields, r2);
2149 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2150 return NO_EXIT;
2153 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2155 /* No cache information provided. */
2156 tcg_gen_movi_i64(o->out, -1);
2157 return NO_EXIT;
2160 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2162 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2163 return NO_EXIT;
2166 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2168 int r1 = get_field(s->fields, r1);
2169 int r2 = get_field(s->fields, r2);
2170 TCGv_i64 t = tcg_temp_new_i64();
2172 /* Note the "subsequently" in the PoO, which implies a defined result
2173 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2174 tcg_gen_shri_i64(t, psw_mask, 32);
2175 store_reg32_i64(r1, t);
2176 if (r2 != 0) {
2177 store_reg32_i64(r2, psw_mask);
2180 tcg_temp_free_i64(t);
2181 return NO_EXIT;
2184 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2186 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2187 tb->flags, (ab)use the tb->cs_base field as the address of
2188 the template in memory, and grab 8 bits of tb->flags/cflags for
2189 the contents of the register. We would then recognize all this
2190 in gen_intermediate_code_internal, generating code for exactly
2191 one instruction. This new TB then gets executed normally.
2193 On the other hand, this seems to be mostly used for modifying
2194 MVC inside of memcpy, which needs a helper call anyway. So
2195 perhaps this doesn't bear thinking about any further. */
2197 TCGv_i64 tmp;
2199 update_psw_addr(s);
2200 gen_op_calc_cc(s);
2202 tmp = tcg_const_i64(s->next_pc);
2203 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2204 tcg_temp_free_i64(tmp);
2206 return NO_EXIT;
2209 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2211 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2212 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2213 tcg_temp_free_i32(m3);
2214 return NO_EXIT;
2217 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2219 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2220 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2221 tcg_temp_free_i32(m3);
2222 return NO_EXIT;
2225 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2227 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2228 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2229 return_low128(o->out2);
2230 tcg_temp_free_i32(m3);
2231 return NO_EXIT;
2234 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2236 /* We'll use the original input for cc computation, since we get to
2237 compare that against 0, which ought to be better than comparing
2238 the real output against 64. It also lets cc_dst be a convenient
2239 temporary during our computation. */
2240 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2242 /* R1 = IN ? CLZ(IN) : 64. */
2243 gen_helper_clz(o->out, o->in2);
2245 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2246 value by 64, which is undefined. But since the shift is 64 iff the
2247 input is zero, we still get the correct result after and'ing. */
2248 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2249 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2250 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2251 return NO_EXIT;
2254 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2256 int m3 = get_field(s->fields, m3);
2257 int pos, len, base = s->insn->data;
2258 TCGv_i64 tmp = tcg_temp_new_i64();
2259 uint64_t ccm;
2261 switch (m3) {
2262 case 0xf:
2263 /* Effectively a 32-bit load. */
2264 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2265 len = 32;
2266 goto one_insert;
2268 case 0xc:
2269 case 0x6:
2270 case 0x3:
2271 /* Effectively a 16-bit load. */
2272 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2273 len = 16;
2274 goto one_insert;
2276 case 0x8:
2277 case 0x4:
2278 case 0x2:
2279 case 0x1:
2280 /* Effectively an 8-bit load. */
2281 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2282 len = 8;
2283 goto one_insert;
2285 one_insert:
2286 pos = base + ctz32(m3) * 8;
2287 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2288 ccm = ((1ull << len) - 1) << pos;
2289 break;
2291 default:
2292 /* This is going to be a sequence of loads and inserts. */
2293 pos = base + 32 - 8;
2294 ccm = 0;
2295 while (m3) {
2296 if (m3 & 0x8) {
2297 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2298 tcg_gen_addi_i64(o->in2, o->in2, 1);
2299 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2300 ccm |= 0xff << pos;
2302 m3 = (m3 << 1) & 0xf;
2303 pos -= 8;
2305 break;
2308 tcg_gen_movi_i64(tmp, ccm);
2309 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2310 tcg_temp_free_i64(tmp);
2311 return NO_EXIT;
2314 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2316 int shift = s->insn->data & 0xff;
2317 int size = s->insn->data >> 8;
2318 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2319 return NO_EXIT;
2322 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2324 TCGv_i64 t1;
2326 gen_op_calc_cc(s);
2327 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2329 t1 = tcg_temp_new_i64();
2330 tcg_gen_shli_i64(t1, psw_mask, 20);
2331 tcg_gen_shri_i64(t1, t1, 36);
2332 tcg_gen_or_i64(o->out, o->out, t1);
2334 tcg_gen_extu_i32_i64(t1, cc_op);
2335 tcg_gen_shli_i64(t1, t1, 28);
2336 tcg_gen_or_i64(o->out, o->out, t1);
2337 tcg_temp_free_i64(t1);
2338 return NO_EXIT;
2341 #ifndef CONFIG_USER_ONLY
2342 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2344 check_privileged(s);
2345 gen_helper_ipte(cpu_env, o->in1, o->in2);
2346 return NO_EXIT;
2349 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2351 check_privileged(s);
2352 gen_helper_iske(o->out, cpu_env, o->in2);
2353 return NO_EXIT;
2355 #endif
2357 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2359 gen_helper_ldeb(o->out, cpu_env, o->in2);
2360 return NO_EXIT;
2363 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2365 gen_helper_ledb(o->out, cpu_env, o->in2);
2366 return NO_EXIT;
2369 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2371 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2372 return NO_EXIT;
2375 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2377 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2378 return NO_EXIT;
2381 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2383 gen_helper_lxdb(o->out, cpu_env, o->in2);
2384 return_low128(o->out2);
2385 return NO_EXIT;
2388 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2390 gen_helper_lxeb(o->out, cpu_env, o->in2);
2391 return_low128(o->out2);
2392 return NO_EXIT;
2395 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2397 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2398 return NO_EXIT;
2401 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2403 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2404 return NO_EXIT;
2407 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2409 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2410 return NO_EXIT;
2413 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2415 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2416 return NO_EXIT;
2419 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2421 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2422 return NO_EXIT;
2425 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2427 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2428 return NO_EXIT;
2431 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2433 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2434 return NO_EXIT;
2437 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2439 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2440 return NO_EXIT;
2443 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2445 TCGLabel *lab = gen_new_label();
2446 store_reg32_i64(get_field(s->fields, r1), o->in2);
2447 /* The value is stored even in case of trap. */
2448 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2449 gen_trap(s);
2450 gen_set_label(lab);
2451 return NO_EXIT;
2454 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2456 TCGLabel *lab = gen_new_label();
2457 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2458 /* The value is stored even in case of trap. */
2459 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2460 gen_trap(s);
2461 gen_set_label(lab);
2462 return NO_EXIT;
2465 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2467 TCGLabel *lab = gen_new_label();
2468 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2469 /* The value is stored even in case of trap. */
2470 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2471 gen_trap(s);
2472 gen_set_label(lab);
2473 return NO_EXIT;
2476 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2478 TCGLabel *lab = gen_new_label();
2479 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2480 /* The value is stored even in case of trap. */
2481 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2482 gen_trap(s);
2483 gen_set_label(lab);
2484 return NO_EXIT;
2487 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2489 TCGLabel *lab = gen_new_label();
2490 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2491 /* The value is stored even in case of trap. */
2492 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2493 gen_trap(s);
2494 gen_set_label(lab);
2495 return NO_EXIT;
2498 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2500 DisasCompare c;
2502 disas_jcc(s, &c, get_field(s->fields, m3));
2504 if (c.is_64) {
2505 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2506 o->in2, o->in1);
2507 free_compare(&c);
2508 } else {
2509 TCGv_i32 t32 = tcg_temp_new_i32();
2510 TCGv_i64 t, z;
2512 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2513 free_compare(&c);
2515 t = tcg_temp_new_i64();
2516 tcg_gen_extu_i32_i64(t, t32);
2517 tcg_temp_free_i32(t32);
2519 z = tcg_const_i64(0);
2520 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2521 tcg_temp_free_i64(t);
2522 tcg_temp_free_i64(z);
2525 return NO_EXIT;
2528 #ifndef CONFIG_USER_ONLY
2529 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2531 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2532 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2533 check_privileged(s);
2534 potential_page_fault(s);
2535 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2536 tcg_temp_free_i32(r1);
2537 tcg_temp_free_i32(r3);
2538 return NO_EXIT;
2541 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2543 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2544 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2545 check_privileged(s);
2546 potential_page_fault(s);
2547 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2548 tcg_temp_free_i32(r1);
2549 tcg_temp_free_i32(r3);
2550 return NO_EXIT;
2552 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2554 check_privileged(s);
2555 potential_page_fault(s);
2556 gen_helper_lra(o->out, cpu_env, o->in2);
2557 set_cc_static(s);
2558 return NO_EXIT;
2561 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2563 TCGv_i64 t1, t2;
2565 check_privileged(s);
2566 per_breaking_event(s);
2568 t1 = tcg_temp_new_i64();
2569 t2 = tcg_temp_new_i64();
2570 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2571 tcg_gen_addi_i64(o->in2, o->in2, 4);
2572 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2573 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2574 tcg_gen_shli_i64(t1, t1, 32);
2575 gen_helper_load_psw(cpu_env, t1, t2);
2576 tcg_temp_free_i64(t1);
2577 tcg_temp_free_i64(t2);
2578 return EXIT_NORETURN;
2581 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2583 TCGv_i64 t1, t2;
2585 check_privileged(s);
2586 per_breaking_event(s);
2588 t1 = tcg_temp_new_i64();
2589 t2 = tcg_temp_new_i64();
2590 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2591 tcg_gen_addi_i64(o->in2, o->in2, 8);
2592 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2593 gen_helper_load_psw(cpu_env, t1, t2);
2594 tcg_temp_free_i64(t1);
2595 tcg_temp_free_i64(t2);
2596 return EXIT_NORETURN;
2598 #endif
2600 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2602 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2603 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2604 potential_page_fault(s);
2605 gen_helper_lam(cpu_env, r1, o->in2, r3);
2606 tcg_temp_free_i32(r1);
2607 tcg_temp_free_i32(r3);
2608 return NO_EXIT;
2611 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2613 int r1 = get_field(s->fields, r1);
2614 int r3 = get_field(s->fields, r3);
2615 TCGv_i64 t1, t2;
2617 /* Only one register to read. */
2618 t1 = tcg_temp_new_i64();
2619 if (unlikely(r1 == r3)) {
2620 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2621 store_reg32_i64(r1, t1);
2622 tcg_temp_free(t1);
2623 return NO_EXIT;
2626 /* First load the values of the first and last registers to trigger
2627 possible page faults. */
2628 t2 = tcg_temp_new_i64();
2629 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2630 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2631 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2632 store_reg32_i64(r1, t1);
2633 store_reg32_i64(r3, t2);
2635 /* Only two registers to read. */
2636 if (((r1 + 1) & 15) == r3) {
2637 tcg_temp_free(t2);
2638 tcg_temp_free(t1);
2639 return NO_EXIT;
2642 /* Then load the remaining registers. Page fault can't occur. */
2643 r3 = (r3 - 1) & 15;
2644 tcg_gen_movi_i64(t2, 4);
2645 while (r1 != r3) {
2646 r1 = (r1 + 1) & 15;
2647 tcg_gen_add_i64(o->in2, o->in2, t2);
2648 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2649 store_reg32_i64(r1, t1);
2651 tcg_temp_free(t2);
2652 tcg_temp_free(t1);
2654 return NO_EXIT;
2657 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2659 int r1 = get_field(s->fields, r1);
2660 int r3 = get_field(s->fields, r3);
2661 TCGv_i64 t1, t2;
2663 /* Only one register to read. */
2664 t1 = tcg_temp_new_i64();
2665 if (unlikely(r1 == r3)) {
2666 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2667 store_reg32h_i64(r1, t1);
2668 tcg_temp_free(t1);
2669 return NO_EXIT;
2672 /* First load the values of the first and last registers to trigger
2673 possible page faults. */
2674 t2 = tcg_temp_new_i64();
2675 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2676 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2677 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2678 store_reg32h_i64(r1, t1);
2679 store_reg32h_i64(r3, t2);
2681 /* Only two registers to read. */
2682 if (((r1 + 1) & 15) == r3) {
2683 tcg_temp_free(t2);
2684 tcg_temp_free(t1);
2685 return NO_EXIT;
2688 /* Then load the remaining registers. Page fault can't occur. */
2689 r3 = (r3 - 1) & 15;
2690 tcg_gen_movi_i64(t2, 4);
2691 while (r1 != r3) {
2692 r1 = (r1 + 1) & 15;
2693 tcg_gen_add_i64(o->in2, o->in2, t2);
2694 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2695 store_reg32h_i64(r1, t1);
2697 tcg_temp_free(t2);
2698 tcg_temp_free(t1);
2700 return NO_EXIT;
2703 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2705 int r1 = get_field(s->fields, r1);
2706 int r3 = get_field(s->fields, r3);
2707 TCGv_i64 t1, t2;
2709 /* Only one register to read. */
2710 if (unlikely(r1 == r3)) {
2711 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2712 return NO_EXIT;
2715 /* First load the values of the first and last registers to trigger
2716 possible page faults. */
2717 t1 = tcg_temp_new_i64();
2718 t2 = tcg_temp_new_i64();
2719 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2720 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2721 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2722 tcg_gen_mov_i64(regs[r1], t1);
2723 tcg_temp_free(t2);
2725 /* Only two registers to read. */
2726 if (((r1 + 1) & 15) == r3) {
2727 tcg_temp_free(t1);
2728 return NO_EXIT;
2731 /* Then load the remaining registers. Page fault can't occur. */
2732 r3 = (r3 - 1) & 15;
2733 tcg_gen_movi_i64(t1, 8);
2734 while (r1 != r3) {
2735 r1 = (r1 + 1) & 15;
2736 tcg_gen_add_i64(o->in2, o->in2, t1);
2737 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2739 tcg_temp_free(t1);
2741 return NO_EXIT;
2744 #ifndef CONFIG_USER_ONLY
2745 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2747 check_privileged(s);
2748 potential_page_fault(s);
2749 gen_helper_lura(o->out, cpu_env, o->in2);
2750 return NO_EXIT;
2753 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2755 check_privileged(s);
2756 potential_page_fault(s);
2757 gen_helper_lurag(o->out, cpu_env, o->in2);
2758 return NO_EXIT;
2760 #endif
2762 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2764 o->out = o->in2;
2765 o->g_out = o->g_in2;
2766 TCGV_UNUSED_I64(o->in2);
2767 o->g_in2 = false;
2768 return NO_EXIT;
2771 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2773 int b2 = get_field(s->fields, b2);
2774 TCGv ar1 = tcg_temp_new_i64();
2776 o->out = o->in2;
2777 o->g_out = o->g_in2;
2778 TCGV_UNUSED_I64(o->in2);
2779 o->g_in2 = false;
2781 switch (s->tb->flags & FLAG_MASK_ASC) {
2782 case PSW_ASC_PRIMARY >> 32:
2783 tcg_gen_movi_i64(ar1, 0);
2784 break;
2785 case PSW_ASC_ACCREG >> 32:
2786 tcg_gen_movi_i64(ar1, 1);
2787 break;
2788 case PSW_ASC_SECONDARY >> 32:
2789 if (b2) {
2790 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2791 } else {
2792 tcg_gen_movi_i64(ar1, 0);
2794 break;
2795 case PSW_ASC_HOME >> 32:
2796 tcg_gen_movi_i64(ar1, 2);
2797 break;
2800 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2801 tcg_temp_free_i64(ar1);
2803 return NO_EXIT;
2806 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2808 o->out = o->in1;
2809 o->out2 = o->in2;
2810 o->g_out = o->g_in1;
2811 o->g_out2 = o->g_in2;
2812 TCGV_UNUSED_I64(o->in1);
2813 TCGV_UNUSED_I64(o->in2);
2814 o->g_in1 = o->g_in2 = false;
2815 return NO_EXIT;
2818 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2820 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2821 potential_page_fault(s);
2822 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2823 tcg_temp_free_i32(l);
2824 return NO_EXIT;
2827 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2829 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2830 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2831 potential_page_fault(s);
2832 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2833 tcg_temp_free_i32(r1);
2834 tcg_temp_free_i32(r2);
2835 set_cc_static(s);
2836 return NO_EXIT;
2839 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2841 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2842 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2843 potential_page_fault(s);
2844 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2845 tcg_temp_free_i32(r1);
2846 tcg_temp_free_i32(r3);
2847 set_cc_static(s);
2848 return NO_EXIT;
2851 #ifndef CONFIG_USER_ONLY
2852 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2854 int r1 = get_field(s->fields, l1);
2855 check_privileged(s);
2856 potential_page_fault(s);
2857 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2858 set_cc_static(s);
2859 return NO_EXIT;
2862 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2864 int r1 = get_field(s->fields, l1);
2865 check_privileged(s);
2866 potential_page_fault(s);
2867 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2868 set_cc_static(s);
2869 return NO_EXIT;
2871 #endif
2873 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2875 potential_page_fault(s);
2876 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2877 set_cc_static(s);
2878 return NO_EXIT;
2881 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2883 potential_page_fault(s);
2884 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2885 set_cc_static(s);
2886 return_low128(o->in2);
2887 return NO_EXIT;
2890 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2892 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2893 return NO_EXIT;
2896 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2898 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2899 return NO_EXIT;
2902 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2904 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2905 return NO_EXIT;
2908 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2910 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2911 return NO_EXIT;
2914 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2916 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2917 return NO_EXIT;
2920 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2922 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2923 return_low128(o->out2);
2924 return NO_EXIT;
2927 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2929 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2930 return_low128(o->out2);
2931 return NO_EXIT;
2934 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2936 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2937 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2938 tcg_temp_free_i64(r3);
2939 return NO_EXIT;
2942 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2944 int r3 = get_field(s->fields, r3);
2945 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2946 return NO_EXIT;
2949 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2951 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2952 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2953 tcg_temp_free_i64(r3);
2954 return NO_EXIT;
2957 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2959 int r3 = get_field(s->fields, r3);
2960 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2961 return NO_EXIT;
2964 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2966 TCGv_i64 z, n;
2967 z = tcg_const_i64(0);
2968 n = tcg_temp_new_i64();
2969 tcg_gen_neg_i64(n, o->in2);
2970 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2971 tcg_temp_free_i64(n);
2972 tcg_temp_free_i64(z);
2973 return NO_EXIT;
2976 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2978 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2979 return NO_EXIT;
2982 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2984 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2985 return NO_EXIT;
2988 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2990 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2991 tcg_gen_mov_i64(o->out2, o->in2);
2992 return NO_EXIT;
2995 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2997 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2998 potential_page_fault(s);
2999 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3000 tcg_temp_free_i32(l);
3001 set_cc_static(s);
3002 return NO_EXIT;
3005 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3007 tcg_gen_neg_i64(o->out, o->in2);
3008 return NO_EXIT;
3011 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3013 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3014 return NO_EXIT;
3017 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3019 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3020 return NO_EXIT;
3023 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3025 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3026 tcg_gen_mov_i64(o->out2, o->in2);
3027 return NO_EXIT;
3030 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3032 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3033 potential_page_fault(s);
3034 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3035 tcg_temp_free_i32(l);
3036 set_cc_static(s);
3037 return NO_EXIT;
3040 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3042 tcg_gen_or_i64(o->out, o->in1, o->in2);
3043 return NO_EXIT;
3046 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3048 int shift = s->insn->data & 0xff;
3049 int size = s->insn->data >> 8;
3050 uint64_t mask = ((1ull << size) - 1) << shift;
3052 assert(!o->g_in2);
3053 tcg_gen_shli_i64(o->in2, o->in2, shift);
3054 tcg_gen_or_i64(o->out, o->in1, o->in2);
3056 /* Produce the CC from only the bits manipulated. */
3057 tcg_gen_andi_i64(cc_dst, o->out, mask);
3058 set_cc_nz_u64(s, cc_dst);
3059 return NO_EXIT;
3062 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3064 gen_helper_popcnt(o->out, o->in2);
3065 return NO_EXIT;
3068 #ifndef CONFIG_USER_ONLY
3069 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3071 check_privileged(s);
3072 gen_helper_ptlb(cpu_env);
3073 return NO_EXIT;
3075 #endif
3077 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3079 int i3 = get_field(s->fields, i3);
3080 int i4 = get_field(s->fields, i4);
3081 int i5 = get_field(s->fields, i5);
3082 int do_zero = i4 & 0x80;
3083 uint64_t mask, imask, pmask;
3084 int pos, len, rot;
3086 /* Adjust the arguments for the specific insn. */
3087 switch (s->fields->op2) {
3088 case 0x55: /* risbg */
3089 i3 &= 63;
3090 i4 &= 63;
3091 pmask = ~0;
3092 break;
3093 case 0x5d: /* risbhg */
3094 i3 &= 31;
3095 i4 &= 31;
3096 pmask = 0xffffffff00000000ull;
3097 break;
3098 case 0x51: /* risblg */
3099 i3 &= 31;
3100 i4 &= 31;
3101 pmask = 0x00000000ffffffffull;
3102 break;
3103 default:
3104 abort();
3107 /* MASK is the set of bits to be inserted from R2.
3108 Take care for I3/I4 wraparound. */
3109 mask = pmask >> i3;
3110 if (i3 <= i4) {
3111 mask ^= pmask >> i4 >> 1;
3112 } else {
3113 mask |= ~(pmask >> i4 >> 1);
3115 mask &= pmask;
3117 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3118 insns, we need to keep the other half of the register. */
3119 imask = ~mask | ~pmask;
3120 if (do_zero) {
3121 if (s->fields->op2 == 0x55) {
3122 imask = 0;
3123 } else {
3124 imask = ~pmask;
3128 /* In some cases we can implement this with deposit, which can be more
3129 efficient on some hosts. */
3130 if (~mask == imask && i3 <= i4) {
3131 if (s->fields->op2 == 0x5d) {
3132 i3 += 32, i4 += 32;
3134 /* Note that we rotate the bits to be inserted to the lsb, not to
3135 the position as described in the PoO. */
3136 len = i4 - i3 + 1;
3137 pos = 63 - i4;
3138 rot = (i5 - pos) & 63;
3139 } else {
3140 pos = len = -1;
3141 rot = i5 & 63;
3144 /* Rotate the input as necessary. */
3145 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3147 /* Insert the selected bits into the output. */
3148 if (pos >= 0) {
3149 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3150 } else if (imask == 0) {
3151 tcg_gen_andi_i64(o->out, o->in2, mask);
3152 } else {
3153 tcg_gen_andi_i64(o->in2, o->in2, mask);
3154 tcg_gen_andi_i64(o->out, o->out, imask);
3155 tcg_gen_or_i64(o->out, o->out, o->in2);
3157 return NO_EXIT;
3160 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3162 int i3 = get_field(s->fields, i3);
3163 int i4 = get_field(s->fields, i4);
3164 int i5 = get_field(s->fields, i5);
3165 uint64_t mask;
3167 /* If this is a test-only form, arrange to discard the result. */
3168 if (i3 & 0x80) {
3169 o->out = tcg_temp_new_i64();
3170 o->g_out = false;
3173 i3 &= 63;
3174 i4 &= 63;
3175 i5 &= 63;
3177 /* MASK is the set of bits to be operated on from R2.
3178 Take care for I3/I4 wraparound. */
3179 mask = ~0ull >> i3;
3180 if (i3 <= i4) {
3181 mask ^= ~0ull >> i4 >> 1;
3182 } else {
3183 mask |= ~(~0ull >> i4 >> 1);
3186 /* Rotate the input as necessary. */
3187 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3189 /* Operate. */
3190 switch (s->fields->op2) {
3191 case 0x55: /* AND */
3192 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3193 tcg_gen_and_i64(o->out, o->out, o->in2);
3194 break;
3195 case 0x56: /* OR */
3196 tcg_gen_andi_i64(o->in2, o->in2, mask);
3197 tcg_gen_or_i64(o->out, o->out, o->in2);
3198 break;
3199 case 0x57: /* XOR */
3200 tcg_gen_andi_i64(o->in2, o->in2, mask);
3201 tcg_gen_xor_i64(o->out, o->out, o->in2);
3202 break;
3203 default:
3204 abort();
3207 /* Set the CC. */
3208 tcg_gen_andi_i64(cc_dst, o->out, mask);
3209 set_cc_nz_u64(s, cc_dst);
3210 return NO_EXIT;
3213 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3215 tcg_gen_bswap16_i64(o->out, o->in2);
3216 return NO_EXIT;
3219 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3221 tcg_gen_bswap32_i64(o->out, o->in2);
3222 return NO_EXIT;
3225 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3227 tcg_gen_bswap64_i64(o->out, o->in2);
3228 return NO_EXIT;
3231 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3233 TCGv_i32 t1 = tcg_temp_new_i32();
3234 TCGv_i32 t2 = tcg_temp_new_i32();
3235 TCGv_i32 to = tcg_temp_new_i32();
3236 tcg_gen_extrl_i64_i32(t1, o->in1);
3237 tcg_gen_extrl_i64_i32(t2, o->in2);
3238 tcg_gen_rotl_i32(to, t1, t2);
3239 tcg_gen_extu_i32_i64(o->out, to);
3240 tcg_temp_free_i32(t1);
3241 tcg_temp_free_i32(t2);
3242 tcg_temp_free_i32(to);
3243 return NO_EXIT;
3246 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3248 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3249 return NO_EXIT;
3252 #ifndef CONFIG_USER_ONLY
3253 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3255 check_privileged(s);
3256 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3257 set_cc_static(s);
3258 return NO_EXIT;
3261 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3263 check_privileged(s);
3264 gen_helper_sacf(cpu_env, o->in2);
3265 /* Addressing mode has changed, so end the block. */
3266 return EXIT_PC_STALE;
3268 #endif
3270 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3272 int sam = s->insn->data;
3273 TCGv_i64 tsam;
3274 uint64_t mask;
3276 switch (sam) {
3277 case 0:
3278 mask = 0xffffff;
3279 break;
3280 case 1:
3281 mask = 0x7fffffff;
3282 break;
3283 default:
3284 mask = -1;
3285 break;
3288 /* Bizarre but true, we check the address of the current insn for the
3289 specification exception, not the next to be executed. Thus the PoO
3290 documents that Bad Things Happen two bytes before the end. */
3291 if (s->pc & ~mask) {
3292 gen_program_exception(s, PGM_SPECIFICATION);
3293 return EXIT_NORETURN;
3295 s->next_pc &= mask;
3297 tsam = tcg_const_i64(sam);
3298 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3299 tcg_temp_free_i64(tsam);
3301 /* Always exit the TB, since we (may have) changed execution mode. */
3302 return EXIT_PC_STALE;
3305 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3307 int r1 = get_field(s->fields, r1);
3308 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3309 return NO_EXIT;
3312 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3314 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3315 return NO_EXIT;
3318 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3320 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3321 return NO_EXIT;
3324 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3326 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3327 return_low128(o->out2);
3328 return NO_EXIT;
3331 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3333 gen_helper_sqeb(o->out, cpu_env, o->in2);
3334 return NO_EXIT;
3337 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3339 gen_helper_sqdb(o->out, cpu_env, o->in2);
3340 return NO_EXIT;
3343 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3345 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3346 return_low128(o->out2);
3347 return NO_EXIT;
3350 #ifndef CONFIG_USER_ONLY
3351 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3353 check_privileged(s);
3354 potential_page_fault(s);
3355 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3356 set_cc_static(s);
3357 return NO_EXIT;
3360 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3362 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3363 check_privileged(s);
3364 potential_page_fault(s);
3365 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3366 tcg_temp_free_i32(r1);
3367 return NO_EXIT;
3369 #endif
3371 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3373 DisasCompare c;
3374 TCGv_i64 a;
3375 TCGLabel *lab;
3376 int r1;
3378 disas_jcc(s, &c, get_field(s->fields, m3));
3380 /* We want to store when the condition is fulfilled, so branch
3381 out when it's not */
3382 c.cond = tcg_invert_cond(c.cond);
3384 lab = gen_new_label();
3385 if (c.is_64) {
3386 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3387 } else {
3388 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3390 free_compare(&c);
3392 r1 = get_field(s->fields, r1);
3393 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3394 if (s->insn->data) {
3395 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3396 } else {
3397 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3399 tcg_temp_free_i64(a);
3401 gen_set_label(lab);
3402 return NO_EXIT;
3405 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3407 uint64_t sign = 1ull << s->insn->data;
3408 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3409 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3410 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3411 /* The arithmetic left shift is curious in that it does not affect
3412 the sign bit. Copy that over from the source unchanged. */
3413 tcg_gen_andi_i64(o->out, o->out, ~sign);
3414 tcg_gen_andi_i64(o->in1, o->in1, sign);
3415 tcg_gen_or_i64(o->out, o->out, o->in1);
3416 return NO_EXIT;
3419 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3421 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3422 return NO_EXIT;
3425 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3427 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3428 return NO_EXIT;
3431 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3433 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3434 return NO_EXIT;
3437 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3439 gen_helper_sfpc(cpu_env, o->in2);
3440 return NO_EXIT;
3443 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3445 gen_helper_sfas(cpu_env, o->in2);
3446 return NO_EXIT;
3449 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3451 int b2 = get_field(s->fields, b2);
3452 int d2 = get_field(s->fields, d2);
3453 TCGv_i64 t1 = tcg_temp_new_i64();
3454 TCGv_i64 t2 = tcg_temp_new_i64();
3455 int mask, pos, len;
3457 switch (s->fields->op2) {
3458 case 0x99: /* SRNM */
3459 pos = 0, len = 2;
3460 break;
3461 case 0xb8: /* SRNMB */
3462 pos = 0, len = 3;
3463 break;
3464 case 0xb9: /* SRNMT */
3465 pos = 4, len = 3;
3466 break;
3467 default:
3468 tcg_abort();
3470 mask = (1 << len) - 1;
3472 /* Insert the value into the appropriate field of the FPC. */
3473 if (b2 == 0) {
3474 tcg_gen_movi_i64(t1, d2 & mask);
3475 } else {
3476 tcg_gen_addi_i64(t1, regs[b2], d2);
3477 tcg_gen_andi_i64(t1, t1, mask);
3479 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3480 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3481 tcg_temp_free_i64(t1);
3483 /* Then install the new FPC to set the rounding mode in fpu_status. */
3484 gen_helper_sfpc(cpu_env, t2);
3485 tcg_temp_free_i64(t2);
3486 return NO_EXIT;
3489 #ifndef CONFIG_USER_ONLY
3490 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3492 check_privileged(s);
3493 tcg_gen_shri_i64(o->in2, o->in2, 4);
3494 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3495 return NO_EXIT;
3498 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3500 check_privileged(s);
3501 gen_helper_sske(cpu_env, o->in1, o->in2);
3502 return NO_EXIT;
3505 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3507 check_privileged(s);
3508 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3509 return NO_EXIT;
3512 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3514 check_privileged(s);
3515 /* ??? Surely cpu address != cpu number. In any case the previous
3516 version of this stored more than the required half-word, so it
3517 is unlikely this has ever been tested. */
3518 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3519 return NO_EXIT;
3522 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3524 gen_helper_stck(o->out, cpu_env);
3525 /* ??? We don't implement clock states. */
3526 gen_op_movi_cc(s, 0);
3527 return NO_EXIT;
3530 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3532 TCGv_i64 c1 = tcg_temp_new_i64();
3533 TCGv_i64 c2 = tcg_temp_new_i64();
3534 gen_helper_stck(c1, cpu_env);
3535 /* Shift the 64-bit value into its place as a zero-extended
3536 104-bit value. Note that "bit positions 64-103 are always
3537 non-zero so that they compare differently to STCK"; we set
3538 the least significant bit to 1. */
3539 tcg_gen_shli_i64(c2, c1, 56);
3540 tcg_gen_shri_i64(c1, c1, 8);
3541 tcg_gen_ori_i64(c2, c2, 0x10000);
3542 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3543 tcg_gen_addi_i64(o->in2, o->in2, 8);
3544 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3545 tcg_temp_free_i64(c1);
3546 tcg_temp_free_i64(c2);
3547 /* ??? We don't implement clock states. */
3548 gen_op_movi_cc(s, 0);
3549 return NO_EXIT;
3552 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3554 check_privileged(s);
3555 gen_helper_sckc(cpu_env, o->in2);
3556 return NO_EXIT;
3559 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3561 check_privileged(s);
3562 gen_helper_stckc(o->out, cpu_env);
3563 return NO_EXIT;
3566 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3568 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3569 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3570 check_privileged(s);
3571 potential_page_fault(s);
3572 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3573 tcg_temp_free_i32(r1);
3574 tcg_temp_free_i32(r3);
3575 return NO_EXIT;
3578 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3580 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3581 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3582 check_privileged(s);
3583 potential_page_fault(s);
3584 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3585 tcg_temp_free_i32(r1);
3586 tcg_temp_free_i32(r3);
3587 return NO_EXIT;
3590 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3592 TCGv_i64 t1 = tcg_temp_new_i64();
3594 check_privileged(s);
3595 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3596 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3597 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3598 tcg_temp_free_i64(t1);
3600 return NO_EXIT;
3603 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3605 check_privileged(s);
3606 gen_helper_spt(cpu_env, o->in2);
3607 return NO_EXIT;
3610 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3612 TCGv_i64 f, a;
3613 /* We really ought to have more complete indication of facilities
3614 that we implement. Address this when STFLE is implemented. */
3615 check_privileged(s);
3616 f = tcg_const_i64(0xc0000000);
3617 a = tcg_const_i64(200);
3618 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3619 tcg_temp_free_i64(f);
3620 tcg_temp_free_i64(a);
3621 return NO_EXIT;
3624 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3626 check_privileged(s);
3627 gen_helper_stpt(o->out, cpu_env);
3628 return NO_EXIT;
3631 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3633 check_privileged(s);
3634 potential_page_fault(s);
3635 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3636 set_cc_static(s);
3637 return NO_EXIT;
3640 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3642 check_privileged(s);
3643 gen_helper_spx(cpu_env, o->in2);
3644 return NO_EXIT;
3647 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3649 check_privileged(s);
3650 potential_page_fault(s);
3651 gen_helper_xsch(cpu_env, regs[1]);
3652 set_cc_static(s);
3653 return NO_EXIT;
3656 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3658 check_privileged(s);
3659 potential_page_fault(s);
3660 gen_helper_csch(cpu_env, regs[1]);
3661 set_cc_static(s);
3662 return NO_EXIT;
3665 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3667 check_privileged(s);
3668 potential_page_fault(s);
3669 gen_helper_hsch(cpu_env, regs[1]);
3670 set_cc_static(s);
3671 return NO_EXIT;
3674 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3676 check_privileged(s);
3677 potential_page_fault(s);
3678 gen_helper_msch(cpu_env, regs[1], o->in2);
3679 set_cc_static(s);
3680 return NO_EXIT;
3683 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3685 check_privileged(s);
3686 potential_page_fault(s);
3687 gen_helper_rchp(cpu_env, regs[1]);
3688 set_cc_static(s);
3689 return NO_EXIT;
3692 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3694 check_privileged(s);
3695 potential_page_fault(s);
3696 gen_helper_rsch(cpu_env, regs[1]);
3697 set_cc_static(s);
3698 return NO_EXIT;
3701 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3703 check_privileged(s);
3704 potential_page_fault(s);
3705 gen_helper_ssch(cpu_env, regs[1], o->in2);
3706 set_cc_static(s);
3707 return NO_EXIT;
3710 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3712 check_privileged(s);
3713 potential_page_fault(s);
3714 gen_helper_stsch(cpu_env, regs[1], o->in2);
3715 set_cc_static(s);
3716 return NO_EXIT;
3719 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3721 check_privileged(s);
3722 potential_page_fault(s);
3723 gen_helper_tsch(cpu_env, regs[1], o->in2);
3724 set_cc_static(s);
3725 return NO_EXIT;
3728 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3730 check_privileged(s);
3731 potential_page_fault(s);
3732 gen_helper_chsc(cpu_env, o->in2);
3733 set_cc_static(s);
3734 return NO_EXIT;
3737 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3739 check_privileged(s);
3740 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3741 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3742 return NO_EXIT;
3745 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3747 uint64_t i2 = get_field(s->fields, i2);
3748 TCGv_i64 t;
3750 check_privileged(s);
3752 /* It is important to do what the instruction name says: STORE THEN.
3753 If we let the output hook perform the store then if we fault and
3754 restart, we'll have the wrong SYSTEM MASK in place. */
3755 t = tcg_temp_new_i64();
3756 tcg_gen_shri_i64(t, psw_mask, 56);
3757 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3758 tcg_temp_free_i64(t);
3760 if (s->fields->op == 0xac) {
3761 tcg_gen_andi_i64(psw_mask, psw_mask,
3762 (i2 << 56) | 0x00ffffffffffffffull);
3763 } else {
3764 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3766 return NO_EXIT;
3769 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3771 check_privileged(s);
3772 potential_page_fault(s);
3773 gen_helper_stura(cpu_env, o->in2, o->in1);
3774 return NO_EXIT;
3777 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3779 check_privileged(s);
3780 potential_page_fault(s);
3781 gen_helper_sturg(cpu_env, o->in2, o->in1);
3782 return NO_EXIT;
3784 #endif
3786 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3788 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3789 return NO_EXIT;
3792 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3794 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3795 return NO_EXIT;
3798 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3800 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3801 return NO_EXIT;
3804 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3806 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3807 return NO_EXIT;
3810 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3812 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3813 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3814 potential_page_fault(s);
3815 gen_helper_stam(cpu_env, r1, o->in2, r3);
3816 tcg_temp_free_i32(r1);
3817 tcg_temp_free_i32(r3);
3818 return NO_EXIT;
3821 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3823 int m3 = get_field(s->fields, m3);
3824 int pos, base = s->insn->data;
3825 TCGv_i64 tmp = tcg_temp_new_i64();
3827 pos = base + ctz32(m3) * 8;
3828 switch (m3) {
3829 case 0xf:
3830 /* Effectively a 32-bit store. */
3831 tcg_gen_shri_i64(tmp, o->in1, pos);
3832 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3833 break;
3835 case 0xc:
3836 case 0x6:
3837 case 0x3:
3838 /* Effectively a 16-bit store. */
3839 tcg_gen_shri_i64(tmp, o->in1, pos);
3840 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3841 break;
3843 case 0x8:
3844 case 0x4:
3845 case 0x2:
3846 case 0x1:
3847 /* Effectively an 8-bit store. */
3848 tcg_gen_shri_i64(tmp, o->in1, pos);
3849 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3850 break;
3852 default:
3853 /* This is going to be a sequence of shifts and stores. */
3854 pos = base + 32 - 8;
3855 while (m3) {
3856 if (m3 & 0x8) {
3857 tcg_gen_shri_i64(tmp, o->in1, pos);
3858 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3859 tcg_gen_addi_i64(o->in2, o->in2, 1);
3861 m3 = (m3 << 1) & 0xf;
3862 pos -= 8;
3864 break;
3866 tcg_temp_free_i64(tmp);
3867 return NO_EXIT;
3870 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3872 int r1 = get_field(s->fields, r1);
3873 int r3 = get_field(s->fields, r3);
3874 int size = s->insn->data;
3875 TCGv_i64 tsize = tcg_const_i64(size);
3877 while (1) {
3878 if (size == 8) {
3879 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3880 } else {
3881 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3883 if (r1 == r3) {
3884 break;
3886 tcg_gen_add_i64(o->in2, o->in2, tsize);
3887 r1 = (r1 + 1) & 15;
3890 tcg_temp_free_i64(tsize);
3891 return NO_EXIT;
3894 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3896 int r1 = get_field(s->fields, r1);
3897 int r3 = get_field(s->fields, r3);
3898 TCGv_i64 t = tcg_temp_new_i64();
3899 TCGv_i64 t4 = tcg_const_i64(4);
3900 TCGv_i64 t32 = tcg_const_i64(32);
3902 while (1) {
3903 tcg_gen_shl_i64(t, regs[r1], t32);
3904 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3905 if (r1 == r3) {
3906 break;
3908 tcg_gen_add_i64(o->in2, o->in2, t4);
3909 r1 = (r1 + 1) & 15;
3912 tcg_temp_free_i64(t);
3913 tcg_temp_free_i64(t4);
3914 tcg_temp_free_i64(t32);
3915 return NO_EXIT;
3918 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3920 potential_page_fault(s);
3921 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3922 set_cc_static(s);
3923 return_low128(o->in2);
3924 return NO_EXIT;
3927 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3929 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3930 return NO_EXIT;
3933 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3935 DisasCompare cmp;
3936 TCGv_i64 borrow;
3938 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3940 /* The !borrow flag is the msb of CC. Since we want the inverse of
3941 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3942 disas_jcc(s, &cmp, 8 | 4);
3943 borrow = tcg_temp_new_i64();
3944 if (cmp.is_64) {
3945 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3946 } else {
3947 TCGv_i32 t = tcg_temp_new_i32();
3948 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3949 tcg_gen_extu_i32_i64(borrow, t);
3950 tcg_temp_free_i32(t);
3952 free_compare(&cmp);
3954 tcg_gen_sub_i64(o->out, o->out, borrow);
3955 tcg_temp_free_i64(borrow);
3956 return NO_EXIT;
3959 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3961 TCGv_i32 t;
3963 update_psw_addr(s);
3964 update_cc_op(s);
3966 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3967 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3968 tcg_temp_free_i32(t);
3970 t = tcg_const_i32(s->next_pc - s->pc);
3971 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3972 tcg_temp_free_i32(t);
3974 gen_exception(EXCP_SVC);
3975 return EXIT_NORETURN;
3978 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3980 gen_helper_tceb(cc_op, o->in1, o->in2);
3981 set_cc_static(s);
3982 return NO_EXIT;
3985 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3987 gen_helper_tcdb(cc_op, o->in1, o->in2);
3988 set_cc_static(s);
3989 return NO_EXIT;
3992 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3994 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3995 set_cc_static(s);
3996 return NO_EXIT;
3999 #ifndef CONFIG_USER_ONLY
4000 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4002 potential_page_fault(s);
4003 gen_helper_tprot(cc_op, o->addr1, o->in2);
4004 set_cc_static(s);
4005 return NO_EXIT;
4007 #endif
4009 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4011 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4012 potential_page_fault(s);
4013 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4014 tcg_temp_free_i32(l);
4015 set_cc_static(s);
4016 return NO_EXIT;
4019 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4021 potential_page_fault(s);
4022 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4023 return_low128(o->out2);
4024 set_cc_static(s);
4025 return NO_EXIT;
4028 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4030 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4031 potential_page_fault(s);
4032 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4033 tcg_temp_free_i32(l);
4034 set_cc_static(s);
4035 return NO_EXIT;
4038 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4040 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4041 potential_page_fault(s);
4042 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4043 tcg_temp_free_i32(l);
4044 return NO_EXIT;
4047 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4049 int d1 = get_field(s->fields, d1);
4050 int d2 = get_field(s->fields, d2);
4051 int b1 = get_field(s->fields, b1);
4052 int b2 = get_field(s->fields, b2);
4053 int l = get_field(s->fields, l1);
4054 TCGv_i32 t32;
4056 o->addr1 = get_address(s, 0, b1, d1);
4058 /* If the addresses are identical, this is a store/memset of zero. */
4059 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4060 o->in2 = tcg_const_i64(0);
4062 l++;
4063 while (l >= 8) {
4064 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4065 l -= 8;
4066 if (l > 0) {
4067 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4070 if (l >= 4) {
4071 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4072 l -= 4;
4073 if (l > 0) {
4074 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4077 if (l >= 2) {
4078 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4079 l -= 2;
4080 if (l > 0) {
4081 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4084 if (l) {
4085 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4087 gen_op_movi_cc(s, 0);
4088 return NO_EXIT;
4091 /* But in general we'll defer to a helper. */
4092 o->in2 = get_address(s, 0, b2, d2);
4093 t32 = tcg_const_i32(l);
4094 potential_page_fault(s);
4095 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4096 tcg_temp_free_i32(t32);
4097 set_cc_static(s);
4098 return NO_EXIT;
4101 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4103 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4104 return NO_EXIT;
4107 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4109 int shift = s->insn->data & 0xff;
4110 int size = s->insn->data >> 8;
4111 uint64_t mask = ((1ull << size) - 1) << shift;
4113 assert(!o->g_in2);
4114 tcg_gen_shli_i64(o->in2, o->in2, shift);
4115 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4117 /* Produce the CC from only the bits manipulated. */
4118 tcg_gen_andi_i64(cc_dst, o->out, mask);
4119 set_cc_nz_u64(s, cc_dst);
4120 return NO_EXIT;
4123 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4125 o->out = tcg_const_i64(0);
4126 return NO_EXIT;
4129 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4131 o->out = tcg_const_i64(0);
4132 o->out2 = o->out;
4133 o->g_out2 = true;
4134 return NO_EXIT;
4137 /* ====================================================================== */
4138 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4139 the original inputs), update the various cc data structures in order to
4140 be able to compute the new condition code. */
4142 static void cout_abs32(DisasContext *s, DisasOps *o)
4144 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4147 static void cout_abs64(DisasContext *s, DisasOps *o)
4149 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4152 static void cout_adds32(DisasContext *s, DisasOps *o)
4154 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4157 static void cout_adds64(DisasContext *s, DisasOps *o)
4159 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4162 static void cout_addu32(DisasContext *s, DisasOps *o)
4164 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4167 static void cout_addu64(DisasContext *s, DisasOps *o)
4169 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4172 static void cout_addc32(DisasContext *s, DisasOps *o)
4174 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4177 static void cout_addc64(DisasContext *s, DisasOps *o)
4179 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4182 static void cout_cmps32(DisasContext *s, DisasOps *o)
4184 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4187 static void cout_cmps64(DisasContext *s, DisasOps *o)
4189 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4192 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4194 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4197 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4199 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4202 static void cout_f32(DisasContext *s, DisasOps *o)
4204 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4207 static void cout_f64(DisasContext *s, DisasOps *o)
4209 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4212 static void cout_f128(DisasContext *s, DisasOps *o)
4214 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4217 static void cout_nabs32(DisasContext *s, DisasOps *o)
4219 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4222 static void cout_nabs64(DisasContext *s, DisasOps *o)
4224 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4227 static void cout_neg32(DisasContext *s, DisasOps *o)
4229 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4232 static void cout_neg64(DisasContext *s, DisasOps *o)
4234 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4237 static void cout_nz32(DisasContext *s, DisasOps *o)
4239 tcg_gen_ext32u_i64(cc_dst, o->out);
4240 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4243 static void cout_nz64(DisasContext *s, DisasOps *o)
4245 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4248 static void cout_s32(DisasContext *s, DisasOps *o)
4250 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4253 static void cout_s64(DisasContext *s, DisasOps *o)
4255 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4258 static void cout_subs32(DisasContext *s, DisasOps *o)
4260 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4263 static void cout_subs64(DisasContext *s, DisasOps *o)
4265 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4268 static void cout_subu32(DisasContext *s, DisasOps *o)
4270 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4273 static void cout_subu64(DisasContext *s, DisasOps *o)
4275 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4278 static void cout_subb32(DisasContext *s, DisasOps *o)
4280 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4283 static void cout_subb64(DisasContext *s, DisasOps *o)
4285 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4288 static void cout_tm32(DisasContext *s, DisasOps *o)
4290 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4293 static void cout_tm64(DisasContext *s, DisasOps *o)
4295 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4298 /* ====================================================================== */
4299 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4300 with the TCG register to which we will write. Used in combination with
4301 the "wout" generators, in some cases we need a new temporary, and in
4302 some cases we can write to a TCG global. */
4304 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4306 o->out = tcg_temp_new_i64();
4308 #define SPEC_prep_new 0
4310 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4312 o->out = tcg_temp_new_i64();
4313 o->out2 = tcg_temp_new_i64();
4315 #define SPEC_prep_new_P 0
4317 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4319 o->out = regs[get_field(f, r1)];
4320 o->g_out = true;
4322 #define SPEC_prep_r1 0
4324 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4326 int r1 = get_field(f, r1);
4327 o->out = regs[r1];
4328 o->out2 = regs[r1 + 1];
4329 o->g_out = o->g_out2 = true;
4331 #define SPEC_prep_r1_P SPEC_r1_even
4333 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4335 o->out = fregs[get_field(f, r1)];
4336 o->g_out = true;
4338 #define SPEC_prep_f1 0
4340 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4342 int r1 = get_field(f, r1);
4343 o->out = fregs[r1];
4344 o->out2 = fregs[r1 + 2];
4345 o->g_out = o->g_out2 = true;
4347 #define SPEC_prep_x1 SPEC_r1_f128
4349 /* ====================================================================== */
4350 /* The "Write OUTput" generators. These generally perform some non-trivial
4351 copy of data to TCG globals, or to main memory. The trivial cases are
4352 generally handled by having a "prep" generator install the TCG global
4353 as the destination of the operation. */
4355 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4357 store_reg(get_field(f, r1), o->out);
4359 #define SPEC_wout_r1 0
4361 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4363 int r1 = get_field(f, r1);
4364 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4366 #define SPEC_wout_r1_8 0
4368 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4370 int r1 = get_field(f, r1);
4371 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4373 #define SPEC_wout_r1_16 0
4375 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4377 store_reg32_i64(get_field(f, r1), o->out);
4379 #define SPEC_wout_r1_32 0
4381 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4383 store_reg32h_i64(get_field(f, r1), o->out);
4385 #define SPEC_wout_r1_32h 0
4387 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4389 int r1 = get_field(f, r1);
4390 store_reg32_i64(r1, o->out);
4391 store_reg32_i64(r1 + 1, o->out2);
4393 #define SPEC_wout_r1_P32 SPEC_r1_even
4395 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4397 int r1 = get_field(f, r1);
4398 store_reg32_i64(r1 + 1, o->out);
4399 tcg_gen_shri_i64(o->out, o->out, 32);
4400 store_reg32_i64(r1, o->out);
4402 #define SPEC_wout_r1_D32 SPEC_r1_even
4404 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4406 store_freg32_i64(get_field(f, r1), o->out);
4408 #define SPEC_wout_e1 0
4410 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4412 store_freg(get_field(f, r1), o->out);
4414 #define SPEC_wout_f1 0
4416 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4418 int f1 = get_field(s->fields, r1);
4419 store_freg(f1, o->out);
4420 store_freg(f1 + 2, o->out2);
4422 #define SPEC_wout_x1 SPEC_r1_f128
4424 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4426 if (get_field(f, r1) != get_field(f, r2)) {
4427 store_reg32_i64(get_field(f, r1), o->out);
4430 #define SPEC_wout_cond_r1r2_32 0
4432 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4434 if (get_field(f, r1) != get_field(f, r2)) {
4435 store_freg32_i64(get_field(f, r1), o->out);
4438 #define SPEC_wout_cond_e1e2 0
4440 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4442 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4444 #define SPEC_wout_m1_8 0
4446 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4448 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4450 #define SPEC_wout_m1_16 0
4452 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4454 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4456 #define SPEC_wout_m1_32 0
4458 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4460 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4462 #define SPEC_wout_m1_64 0
4464 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4466 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4468 #define SPEC_wout_m2_32 0
4470 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4472 /* XXX release reservation */
4473 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4474 store_reg32_i64(get_field(f, r1), o->in2);
4476 #define SPEC_wout_m2_32_r1_atomic 0
4478 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4480 /* XXX release reservation */
4481 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4482 store_reg(get_field(f, r1), o->in2);
4484 #define SPEC_wout_m2_64_r1_atomic 0
4486 /* ====================================================================== */
4487 /* The "INput 1" generators. These load the first operand to an insn. */
4489 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4491 o->in1 = load_reg(get_field(f, r1));
4493 #define SPEC_in1_r1 0
4495 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4497 o->in1 = regs[get_field(f, r1)];
4498 o->g_in1 = true;
4500 #define SPEC_in1_r1_o 0
4502 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4504 o->in1 = tcg_temp_new_i64();
4505 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4507 #define SPEC_in1_r1_32s 0
4509 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4511 o->in1 = tcg_temp_new_i64();
4512 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4514 #define SPEC_in1_r1_32u 0
4516 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4518 o->in1 = tcg_temp_new_i64();
4519 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4521 #define SPEC_in1_r1_sr32 0
4523 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4525 o->in1 = load_reg(get_field(f, r1) + 1);
4527 #define SPEC_in1_r1p1 SPEC_r1_even
4529 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4531 o->in1 = tcg_temp_new_i64();
4532 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4534 #define SPEC_in1_r1p1_32s SPEC_r1_even
4536 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4538 o->in1 = tcg_temp_new_i64();
4539 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4541 #define SPEC_in1_r1p1_32u SPEC_r1_even
4543 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4545 int r1 = get_field(f, r1);
4546 o->in1 = tcg_temp_new_i64();
4547 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4549 #define SPEC_in1_r1_D32 SPEC_r1_even
4551 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4553 o->in1 = load_reg(get_field(f, r2));
4555 #define SPEC_in1_r2 0
4557 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4559 o->in1 = tcg_temp_new_i64();
4560 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4562 #define SPEC_in1_r2_sr32 0
4564 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4566 o->in1 = load_reg(get_field(f, r3));
4568 #define SPEC_in1_r3 0
4570 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4572 o->in1 = regs[get_field(f, r3)];
4573 o->g_in1 = true;
4575 #define SPEC_in1_r3_o 0
4577 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4579 o->in1 = tcg_temp_new_i64();
4580 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4582 #define SPEC_in1_r3_32s 0
4584 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4586 o->in1 = tcg_temp_new_i64();
4587 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4589 #define SPEC_in1_r3_32u 0
4591 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4593 int r3 = get_field(f, r3);
4594 o->in1 = tcg_temp_new_i64();
4595 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4597 #define SPEC_in1_r3_D32 SPEC_r3_even
4599 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4601 o->in1 = load_freg32_i64(get_field(f, r1));
4603 #define SPEC_in1_e1 0
4605 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4607 o->in1 = fregs[get_field(f, r1)];
4608 o->g_in1 = true;
4610 #define SPEC_in1_f1_o 0
4612 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4614 int r1 = get_field(f, r1);
4615 o->out = fregs[r1];
4616 o->out2 = fregs[r1 + 2];
4617 o->g_out = o->g_out2 = true;
4619 #define SPEC_in1_x1_o SPEC_r1_f128
4621 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4623 o->in1 = fregs[get_field(f, r3)];
4624 o->g_in1 = true;
4626 #define SPEC_in1_f3_o 0
4628 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4630 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4632 #define SPEC_in1_la1 0
4634 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4636 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4637 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4639 #define SPEC_in1_la2 0
4641 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4643 in1_la1(s, f, o);
4644 o->in1 = tcg_temp_new_i64();
4645 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4647 #define SPEC_in1_m1_8u 0
4649 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4651 in1_la1(s, f, o);
4652 o->in1 = tcg_temp_new_i64();
4653 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4655 #define SPEC_in1_m1_16s 0
4657 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4659 in1_la1(s, f, o);
4660 o->in1 = tcg_temp_new_i64();
4661 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4663 #define SPEC_in1_m1_16u 0
4665 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4667 in1_la1(s, f, o);
4668 o->in1 = tcg_temp_new_i64();
4669 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4671 #define SPEC_in1_m1_32s 0
4673 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4675 in1_la1(s, f, o);
4676 o->in1 = tcg_temp_new_i64();
4677 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4679 #define SPEC_in1_m1_32u 0
4681 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4683 in1_la1(s, f, o);
4684 o->in1 = tcg_temp_new_i64();
4685 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4687 #define SPEC_in1_m1_64 0
4689 /* ====================================================================== */
4690 /* The "INput 2" generators. These load the second operand to an insn. */
4692 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4694 o->in2 = regs[get_field(f, r1)];
4695 o->g_in2 = true;
4697 #define SPEC_in2_r1_o 0
4699 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4701 o->in2 = tcg_temp_new_i64();
4702 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4704 #define SPEC_in2_r1_16u 0
4706 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4708 o->in2 = tcg_temp_new_i64();
4709 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4711 #define SPEC_in2_r1_32u 0
4713 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4715 int r1 = get_field(f, r1);
4716 o->in2 = tcg_temp_new_i64();
4717 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4719 #define SPEC_in2_r1_D32 SPEC_r1_even
4721 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4723 o->in2 = load_reg(get_field(f, r2));
4725 #define SPEC_in2_r2 0
4727 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4729 o->in2 = regs[get_field(f, r2)];
4730 o->g_in2 = true;
4732 #define SPEC_in2_r2_o 0
4734 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4736 int r2 = get_field(f, r2);
4737 if (r2 != 0) {
4738 o->in2 = load_reg(r2);
4741 #define SPEC_in2_r2_nz 0
4743 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4745 o->in2 = tcg_temp_new_i64();
4746 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4748 #define SPEC_in2_r2_8s 0
4750 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4752 o->in2 = tcg_temp_new_i64();
4753 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4755 #define SPEC_in2_r2_8u 0
4757 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4759 o->in2 = tcg_temp_new_i64();
4760 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4762 #define SPEC_in2_r2_16s 0
4764 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4766 o->in2 = tcg_temp_new_i64();
4767 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4769 #define SPEC_in2_r2_16u 0
4771 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4773 o->in2 = load_reg(get_field(f, r3));
4775 #define SPEC_in2_r3 0
4777 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4779 o->in2 = tcg_temp_new_i64();
4780 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4782 #define SPEC_in2_r3_sr32 0
4784 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4786 o->in2 = tcg_temp_new_i64();
4787 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4789 #define SPEC_in2_r2_32s 0
4791 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4793 o->in2 = tcg_temp_new_i64();
4794 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4796 #define SPEC_in2_r2_32u 0
4798 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4800 o->in2 = tcg_temp_new_i64();
4801 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4803 #define SPEC_in2_r2_sr32 0
4805 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4807 o->in2 = load_freg32_i64(get_field(f, r2));
4809 #define SPEC_in2_e2 0
4811 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4813 o->in2 = fregs[get_field(f, r2)];
4814 o->g_in2 = true;
4816 #define SPEC_in2_f2_o 0
4818 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4820 int r2 = get_field(f, r2);
4821 o->in1 = fregs[r2];
4822 o->in2 = fregs[r2 + 2];
4823 o->g_in1 = o->g_in2 = true;
4825 #define SPEC_in2_x2_o SPEC_r2_f128
4827 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4829 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4831 #define SPEC_in2_ra2 0
4833 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4835 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4836 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4838 #define SPEC_in2_a2 0
4840 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4842 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4844 #define SPEC_in2_ri2 0
4846 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4848 help_l2_shift(s, f, o, 31);
4850 #define SPEC_in2_sh32 0
4852 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4854 help_l2_shift(s, f, o, 63);
4856 #define SPEC_in2_sh64 0
4858 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4860 in2_a2(s, f, o);
4861 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4863 #define SPEC_in2_m2_8u 0
4865 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4867 in2_a2(s, f, o);
4868 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4870 #define SPEC_in2_m2_16s 0
4872 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4874 in2_a2(s, f, o);
4875 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4877 #define SPEC_in2_m2_16u 0
4879 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4881 in2_a2(s, f, o);
4882 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4884 #define SPEC_in2_m2_32s 0
4886 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4888 in2_a2(s, f, o);
4889 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4891 #define SPEC_in2_m2_32u 0
4893 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4895 in2_a2(s, f, o);
4896 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4898 #define SPEC_in2_m2_64 0
4900 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4902 in2_ri2(s, f, o);
4903 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4905 #define SPEC_in2_mri2_16u 0
4907 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4909 in2_ri2(s, f, o);
4910 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4912 #define SPEC_in2_mri2_32s 0
4914 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4916 in2_ri2(s, f, o);
4917 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4919 #define SPEC_in2_mri2_32u 0
4921 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4923 in2_ri2(s, f, o);
4924 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4926 #define SPEC_in2_mri2_64 0
4928 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4930 /* XXX should reserve the address */
4931 in1_la2(s, f, o);
4932 o->in2 = tcg_temp_new_i64();
4933 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4935 #define SPEC_in2_m2_32s_atomic 0
4937 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4939 /* XXX should reserve the address */
4940 in1_la2(s, f, o);
4941 o->in2 = tcg_temp_new_i64();
4942 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4944 #define SPEC_in2_m2_64_atomic 0
4946 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4948 o->in2 = tcg_const_i64(get_field(f, i2));
4950 #define SPEC_in2_i2 0
4952 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4954 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4956 #define SPEC_in2_i2_8u 0
4958 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4960 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4962 #define SPEC_in2_i2_16u 0
4964 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4966 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4968 #define SPEC_in2_i2_32u 0
4970 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4972 uint64_t i2 = (uint16_t)get_field(f, i2);
4973 o->in2 = tcg_const_i64(i2 << s->insn->data);
4975 #define SPEC_in2_i2_16u_shl 0
4977 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4979 uint64_t i2 = (uint32_t)get_field(f, i2);
4980 o->in2 = tcg_const_i64(i2 << s->insn->data);
4982 #define SPEC_in2_i2_32u_shl 0
4984 #ifndef CONFIG_USER_ONLY
4985 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4987 o->in2 = tcg_const_i64(s->fields->raw_insn);
4989 #define SPEC_in2_insn 0
4990 #endif
4992 /* ====================================================================== */
4994 /* Find opc within the table of insns. This is formulated as a switch
4995 statement so that (1) we get compile-time notice of cut-paste errors
4996 for duplicated opcodes, and (2) the compiler generates the binary
4997 search tree, rather than us having to post-process the table. */
4999 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5000 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5002 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5004 enum DisasInsnEnum {
5005 #include "insn-data.def"
5008 #undef D
5009 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5010 .opc = OPC, \
5011 .fmt = FMT_##FT, \
5012 .fac = FAC_##FC, \
5013 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5014 .name = #NM, \
5015 .help_in1 = in1_##I1, \
5016 .help_in2 = in2_##I2, \
5017 .help_prep = prep_##P, \
5018 .help_wout = wout_##W, \
5019 .help_cout = cout_##CC, \
5020 .help_op = op_##OP, \
5021 .data = D \
5024 /* Allow 0 to be used for NULL in the table below. */
5025 #define in1_0 NULL
5026 #define in2_0 NULL
5027 #define prep_0 NULL
5028 #define wout_0 NULL
5029 #define cout_0 NULL
5030 #define op_0 NULL
5032 #define SPEC_in1_0 0
5033 #define SPEC_in2_0 0
5034 #define SPEC_prep_0 0
5035 #define SPEC_wout_0 0
5037 static const DisasInsn insn_info[] = {
5038 #include "insn-data.def"
5041 #undef D
5042 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5043 case OPC: return &insn_info[insn_ ## NM];
5045 static const DisasInsn *lookup_opc(uint16_t opc)
5047 switch (opc) {
5048 #include "insn-data.def"
5049 default:
5050 return NULL;
5054 #undef D
5055 #undef C
5057 /* Extract a field from the insn. The INSN should be left-aligned in
5058 the uint64_t so that we can more easily utilize the big-bit-endian
5059 definitions we extract from the Principals of Operation. */
5061 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5063 uint32_t r, m;
5065 if (f->size == 0) {
5066 return;
5069 /* Zero extract the field from the insn. */
5070 r = (insn << f->beg) >> (64 - f->size);
5072 /* Sign-extend, or un-swap the field as necessary. */
5073 switch (f->type) {
5074 case 0: /* unsigned */
5075 break;
5076 case 1: /* signed */
5077 assert(f->size <= 32);
5078 m = 1u << (f->size - 1);
5079 r = (r ^ m) - m;
5080 break;
5081 case 2: /* dl+dh split, signed 20 bit. */
5082 r = ((int8_t)r << 12) | (r >> 8);
5083 break;
5084 default:
5085 abort();
5088 /* Validate that the "compressed" encoding we selected above is valid.
5089 I.e. we havn't make two different original fields overlap. */
5090 assert(((o->presentC >> f->indexC) & 1) == 0);
5091 o->presentC |= 1 << f->indexC;
5092 o->presentO |= 1 << f->indexO;
5094 o->c[f->indexC] = r;
5097 /* Lookup the insn at the current PC, extracting the operands into O and
5098 returning the info struct for the insn. Returns NULL for invalid insn. */
5100 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5101 DisasFields *f)
5103 uint64_t insn, pc = s->pc;
5104 int op, op2, ilen;
5105 const DisasInsn *info;
5107 insn = ld_code2(env, pc);
5108 op = (insn >> 8) & 0xff;
5109 ilen = get_ilen(op);
5110 s->next_pc = s->pc + ilen;
5112 switch (ilen) {
5113 case 2:
5114 insn = insn << 48;
5115 break;
5116 case 4:
5117 insn = ld_code4(env, pc) << 32;
5118 break;
5119 case 6:
5120 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5121 break;
5122 default:
5123 abort();
5126 /* We can't actually determine the insn format until we've looked up
5127 the full insn opcode. Which we can't do without locating the
5128 secondary opcode. Assume by default that OP2 is at bit 40; for
5129 those smaller insns that don't actually have a secondary opcode
5130 this will correctly result in OP2 = 0. */
5131 switch (op) {
5132 case 0x01: /* E */
5133 case 0x80: /* S */
5134 case 0x82: /* S */
5135 case 0x93: /* S */
5136 case 0xb2: /* S, RRF, RRE */
5137 case 0xb3: /* RRE, RRD, RRF */
5138 case 0xb9: /* RRE, RRF */
5139 case 0xe5: /* SSE, SIL */
5140 op2 = (insn << 8) >> 56;
5141 break;
5142 case 0xa5: /* RI */
5143 case 0xa7: /* RI */
5144 case 0xc0: /* RIL */
5145 case 0xc2: /* RIL */
5146 case 0xc4: /* RIL */
5147 case 0xc6: /* RIL */
5148 case 0xc8: /* SSF */
5149 case 0xcc: /* RIL */
5150 op2 = (insn << 12) >> 60;
5151 break;
5152 case 0xd0 ... 0xdf: /* SS */
5153 case 0xe1: /* SS */
5154 case 0xe2: /* SS */
5155 case 0xe8: /* SS */
5156 case 0xe9: /* SS */
5157 case 0xea: /* SS */
5158 case 0xee ... 0xf3: /* SS */
5159 case 0xf8 ... 0xfd: /* SS */
5160 op2 = 0;
5161 break;
5162 default:
5163 op2 = (insn << 40) >> 56;
5164 break;
5167 memset(f, 0, sizeof(*f));
5168 f->raw_insn = insn;
5169 f->op = op;
5170 f->op2 = op2;
5172 /* Lookup the instruction. */
5173 info = lookup_opc(op << 8 | op2);
5175 /* If we found it, extract the operands. */
5176 if (info != NULL) {
5177 DisasFormat fmt = info->fmt;
5178 int i;
5180 for (i = 0; i < NUM_C_FIELD; ++i) {
5181 extract_field(f, &format_info[fmt].op[i], insn);
5184 return info;
5187 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5189 const DisasInsn *insn;
5190 ExitStatus ret = NO_EXIT;
5191 DisasFields f;
5192 DisasOps o;
5194 /* Search for the insn in the table. */
5195 insn = extract_insn(env, s, &f);
5197 /* Not found means unimplemented/illegal opcode. */
5198 if (insn == NULL) {
5199 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5200 f.op, f.op2);
5201 gen_illegal_opcode(s);
5202 return EXIT_NORETURN;
5205 #ifndef CONFIG_USER_ONLY
5206 if (s->tb->flags & FLAG_MASK_PER) {
5207 TCGv_i64 addr = tcg_const_i64(s->pc);
5208 gen_helper_per_ifetch(cpu_env, addr);
5209 tcg_temp_free_i64(addr);
5211 #endif
5213 /* Check for insn specification exceptions. */
5214 if (insn->spec) {
5215 int spec = insn->spec, excp = 0, r;
5217 if (spec & SPEC_r1_even) {
5218 r = get_field(&f, r1);
5219 if (r & 1) {
5220 excp = PGM_SPECIFICATION;
5223 if (spec & SPEC_r2_even) {
5224 r = get_field(&f, r2);
5225 if (r & 1) {
5226 excp = PGM_SPECIFICATION;
5229 if (spec & SPEC_r3_even) {
5230 r = get_field(&f, r3);
5231 if (r & 1) {
5232 excp = PGM_SPECIFICATION;
5235 if (spec & SPEC_r1_f128) {
5236 r = get_field(&f, r1);
5237 if (r > 13) {
5238 excp = PGM_SPECIFICATION;
5241 if (spec & SPEC_r2_f128) {
5242 r = get_field(&f, r2);
5243 if (r > 13) {
5244 excp = PGM_SPECIFICATION;
5247 if (excp) {
5248 gen_program_exception(s, excp);
5249 return EXIT_NORETURN;
5253 /* Set up the strutures we use to communicate with the helpers. */
5254 s->insn = insn;
5255 s->fields = &f;
5256 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5257 TCGV_UNUSED_I64(o.out);
5258 TCGV_UNUSED_I64(o.out2);
5259 TCGV_UNUSED_I64(o.in1);
5260 TCGV_UNUSED_I64(o.in2);
5261 TCGV_UNUSED_I64(o.addr1);
5263 /* Implement the instruction. */
5264 if (insn->help_in1) {
5265 insn->help_in1(s, &f, &o);
5267 if (insn->help_in2) {
5268 insn->help_in2(s, &f, &o);
5270 if (insn->help_prep) {
5271 insn->help_prep(s, &f, &o);
5273 if (insn->help_op) {
5274 ret = insn->help_op(s, &o);
5276 if (insn->help_wout) {
5277 insn->help_wout(s, &f, &o);
5279 if (insn->help_cout) {
5280 insn->help_cout(s, &o);
5283 /* Free any temporaries created by the helpers. */
5284 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5285 tcg_temp_free_i64(o.out);
5287 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5288 tcg_temp_free_i64(o.out2);
5290 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5291 tcg_temp_free_i64(o.in1);
5293 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5294 tcg_temp_free_i64(o.in2);
5296 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5297 tcg_temp_free_i64(o.addr1);
5300 #ifndef CONFIG_USER_ONLY
5301 if (s->tb->flags & FLAG_MASK_PER) {
5302 /* An exception might be triggered, save PSW if not already done. */
5303 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5304 tcg_gen_movi_i64(psw_addr, s->next_pc);
5307 /* Save off cc. */
5308 update_cc_op(s);
5310 /* Call the helper to check for a possible PER exception. */
5311 gen_helper_per_check_exception(cpu_env);
5313 #endif
5315 /* Advance to the next instruction. */
5316 s->pc = s->next_pc;
5317 return ret;
5320 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5322 S390CPU *cpu = s390_env_get_cpu(env);
5323 CPUState *cs = CPU(cpu);
5324 DisasContext dc;
5325 target_ulong pc_start;
5326 uint64_t next_page_start;
5327 int num_insns, max_insns;
5328 ExitStatus status;
5329 bool do_debug;
5331 pc_start = tb->pc;
5333 /* 31-bit mode */
5334 if (!(tb->flags & FLAG_MASK_64)) {
5335 pc_start &= 0x7fffffff;
5338 dc.tb = tb;
5339 dc.pc = pc_start;
5340 dc.cc_op = CC_OP_DYNAMIC;
5341 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5343 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5345 num_insns = 0;
5346 max_insns = tb->cflags & CF_COUNT_MASK;
5347 if (max_insns == 0) {
5348 max_insns = CF_COUNT_MASK;
5350 if (max_insns > TCG_MAX_INSNS) {
5351 max_insns = TCG_MAX_INSNS;
5354 gen_tb_start(tb);
5356 do {
5357 tcg_gen_insn_start(dc.pc, dc.cc_op);
5358 num_insns++;
5360 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5361 status = EXIT_PC_STALE;
5362 do_debug = true;
5363 /* The address covered by the breakpoint must be included in
5364 [tb->pc, tb->pc + tb->size) in order to for it to be
5365 properly cleared -- thus we increment the PC here so that
5366 the logic setting tb->size below does the right thing. */
5367 dc.pc += 2;
5368 break;
5371 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5372 gen_io_start();
5375 status = NO_EXIT;
5376 if (status == NO_EXIT) {
5377 status = translate_one(env, &dc);
5380 /* If we reach a page boundary, are single stepping,
5381 or exhaust instruction count, stop generation. */
5382 if (status == NO_EXIT
5383 && (dc.pc >= next_page_start
5384 || tcg_op_buf_full()
5385 || num_insns >= max_insns
5386 || singlestep
5387 || cs->singlestep_enabled)) {
5388 status = EXIT_PC_STALE;
5390 } while (status == NO_EXIT);
5392 if (tb->cflags & CF_LAST_IO) {
5393 gen_io_end();
5396 switch (status) {
5397 case EXIT_GOTO_TB:
5398 case EXIT_NORETURN:
5399 break;
5400 case EXIT_PC_STALE:
5401 update_psw_addr(&dc);
5402 /* FALLTHRU */
5403 case EXIT_PC_UPDATED:
5404 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5405 cc op type is in env */
5406 update_cc_op(&dc);
5407 /* Exit the TB, either by raising a debug exception or by return. */
5408 if (do_debug) {
5409 gen_exception(EXCP_DEBUG);
5410 } else {
5411 tcg_gen_exit_tb(0);
5413 break;
5414 default:
5415 abort();
5418 gen_tb_end(tb, num_insns);
5420 tb->size = dc.pc - pc_start;
5421 tb->icount = num_insns;
5423 #if defined(S390X_DEBUG_DISAS)
5424 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5425 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5426 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5427 qemu_log("\n");
5429 #endif
5432 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5433 target_ulong *data)
5435 int cc_op = data[1];
5436 env->psw.addr = data[0];
5437 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5438 env->cc_op = cc_op;