blockdev: Keep track of monitor-owned BDS
[qemu/ar7.git] / target-s390x / translate.c
blob811928b1c3eaaae2930b0e19018d5d74ce137270
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "tcg-op.h"
35 #include "qemu/log.h"
36 #include "qemu/host-utils.h"
37 #include "exec/cpu_ldst.h"
39 /* global register indexes */
40 static TCGv_ptr cpu_env;
42 #include "exec/gen-icount.h"
43 #include "exec/helper-proto.h"
44 #include "exec/helper-gen.h"
46 #include "trace-tcg.h"
49 /* Information that (most) every instruction needs to manipulate. */
50 typedef struct DisasContext DisasContext;
51 typedef struct DisasInsn DisasInsn;
52 typedef struct DisasFields DisasFields;
54 struct DisasContext {
55 struct TranslationBlock *tb;
56 const DisasInsn *insn;
57 DisasFields *fields;
58 uint64_t pc, next_pc;
59 enum cc_op cc_op;
60 bool singlestep_enabled;
63 /* Information carried about a condition to be evaluated. */
64 typedef struct {
65 TCGCond cond:8;
66 bool is_64;
67 bool g1;
68 bool g2;
69 union {
70 struct { TCGv_i64 a, b; } s64;
71 struct { TCGv_i32 a, b; } s32;
72 } u;
73 } DisasCompare;
75 #define DISAS_EXCP 4
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
82 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
84 if (!(s->tb->flags & FLAG_MASK_64)) {
85 if (s->tb->flags & FLAG_MASK_32) {
86 return pc | 0x80000000;
89 return pc;
92 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
93 int flags)
95 S390CPU *cpu = S390_CPU(cs);
96 CPUS390XState *env = &cpu->env;
97 int i;
99 if (env->cc_op > 3) {
100 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
101 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
102 } else {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
104 env->psw.mask, env->psw.addr, env->cc_op);
107 for (i = 0; i < 16; i++) {
108 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
109 if ((i % 4) == 3) {
110 cpu_fprintf(f, "\n");
111 } else {
112 cpu_fprintf(f, " ");
116 for (i = 0; i < 16; i++) {
117 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
118 if ((i % 4) == 3) {
119 cpu_fprintf(f, "\n");
120 } else {
121 cpu_fprintf(f, " ");
125 for (i = 0; i < 32; i++) {
126 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
127 env->vregs[i][0].ll, env->vregs[i][1].ll);
128 cpu_fprintf(f, (i % 2) ? "\n" : " ");
131 #ifndef CONFIG_USER_ONLY
132 for (i = 0; i < 16; i++) {
133 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
134 if ((i % 4) == 3) {
135 cpu_fprintf(f, "\n");
136 } else {
137 cpu_fprintf(f, " ");
140 #endif
142 #ifdef DEBUG_INLINE_BRANCHES
143 for (i = 0; i < CC_OP_MAX; i++) {
144 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
145 inline_branch_miss[i], inline_branch_hit[i]);
147 #endif
149 cpu_fprintf(f, "\n");
152 static TCGv_i64 psw_addr;
153 static TCGv_i64 psw_mask;
154 static TCGv_i64 gbea;
156 static TCGv_i32 cc_op;
157 static TCGv_i64 cc_src;
158 static TCGv_i64 cc_dst;
159 static TCGv_i64 cc_vr;
161 static char cpu_reg_names[32][4];
162 static TCGv_i64 regs[16];
163 static TCGv_i64 fregs[16];
165 void s390x_translate_init(void)
167 int i;
169 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
170 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
171 offsetof(CPUS390XState, psw.addr),
172 "psw_addr");
173 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
174 offsetof(CPUS390XState, psw.mask),
175 "psw_mask");
176 gbea = tcg_global_mem_new_i64(TCG_AREG0,
177 offsetof(CPUS390XState, gbea),
178 "gbea");
180 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
181 "cc_op");
182 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
183 "cc_src");
184 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
185 "cc_dst");
186 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
187 "cc_vr");
189 for (i = 0; i < 16; i++) {
190 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
191 regs[i] = tcg_global_mem_new(TCG_AREG0,
192 offsetof(CPUS390XState, regs[i]),
193 cpu_reg_names[i]);
196 for (i = 0; i < 16; i++) {
197 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
198 fregs[i] = tcg_global_mem_new(TCG_AREG0,
199 offsetof(CPUS390XState, vregs[i][0].d),
200 cpu_reg_names[i + 16]);
204 static TCGv_i64 load_reg(int reg)
206 TCGv_i64 r = tcg_temp_new_i64();
207 tcg_gen_mov_i64(r, regs[reg]);
208 return r;
211 static TCGv_i64 load_freg32_i64(int reg)
213 TCGv_i64 r = tcg_temp_new_i64();
214 tcg_gen_shri_i64(r, fregs[reg], 32);
215 return r;
218 static void store_reg(int reg, TCGv_i64 v)
220 tcg_gen_mov_i64(regs[reg], v);
223 static void store_freg(int reg, TCGv_i64 v)
225 tcg_gen_mov_i64(fregs[reg], v);
228 static void store_reg32_i64(int reg, TCGv_i64 v)
230 /* 32 bit register writes keep the upper half */
231 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
234 static void store_reg32h_i64(int reg, TCGv_i64 v)
236 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
239 static void store_freg32_i64(int reg, TCGv_i64 v)
241 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
244 static void return_low128(TCGv_i64 dest)
246 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
249 static void update_psw_addr(DisasContext *s)
251 /* psw.addr */
252 tcg_gen_movi_i64(psw_addr, s->pc);
255 static void per_branch(DisasContext *s, bool to_next)
257 #ifndef CONFIG_USER_ONLY
258 tcg_gen_movi_i64(gbea, s->pc);
260 if (s->tb->flags & FLAG_MASK_PER) {
261 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
262 gen_helper_per_branch(cpu_env, gbea, next_pc);
263 if (to_next) {
264 tcg_temp_free_i64(next_pc);
267 #endif
270 static void per_branch_cond(DisasContext *s, TCGCond cond,
271 TCGv_i64 arg1, TCGv_i64 arg2)
273 #ifndef CONFIG_USER_ONLY
274 if (s->tb->flags & FLAG_MASK_PER) {
275 TCGLabel *lab = gen_new_label();
276 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
278 tcg_gen_movi_i64(gbea, s->pc);
279 gen_helper_per_branch(cpu_env, gbea, psw_addr);
281 gen_set_label(lab);
282 } else {
283 TCGv_i64 pc = tcg_const_i64(s->pc);
284 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
285 tcg_temp_free_i64(pc);
287 #endif
290 static void per_breaking_event(DisasContext *s)
292 tcg_gen_movi_i64(gbea, s->pc);
295 static void update_cc_op(DisasContext *s)
297 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
298 tcg_gen_movi_i32(cc_op, s->cc_op);
302 static void potential_page_fault(DisasContext *s)
304 update_psw_addr(s);
305 update_cc_op(s);
308 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
310 return (uint64_t)cpu_lduw_code(env, pc);
313 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
315 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
318 static int get_mem_index(DisasContext *s)
320 switch (s->tb->flags & FLAG_MASK_ASC) {
321 case PSW_ASC_PRIMARY >> 32:
322 return 0;
323 case PSW_ASC_SECONDARY >> 32:
324 return 1;
325 case PSW_ASC_HOME >> 32:
326 return 2;
327 default:
328 tcg_abort();
329 break;
333 static void gen_exception(int excp)
335 TCGv_i32 tmp = tcg_const_i32(excp);
336 gen_helper_exception(cpu_env, tmp);
337 tcg_temp_free_i32(tmp);
340 static void gen_program_exception(DisasContext *s, int code)
342 TCGv_i32 tmp;
344 /* Remember what pgm exeption this was. */
345 tmp = tcg_const_i32(code);
346 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
347 tcg_temp_free_i32(tmp);
349 tmp = tcg_const_i32(s->next_pc - s->pc);
350 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
351 tcg_temp_free_i32(tmp);
353 /* Advance past instruction. */
354 s->pc = s->next_pc;
355 update_psw_addr(s);
357 /* Save off cc. */
358 update_cc_op(s);
360 /* Trigger exception. */
361 gen_exception(EXCP_PGM);
364 static inline void gen_illegal_opcode(DisasContext *s)
366 gen_program_exception(s, PGM_OPERATION);
369 static inline void gen_trap(DisasContext *s)
371 TCGv_i32 t;
373 /* Set DXC to 0xff. */
374 t = tcg_temp_new_i32();
375 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
376 tcg_gen_ori_i32(t, t, 0xff00);
377 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
378 tcg_temp_free_i32(t);
380 gen_program_exception(s, PGM_DATA);
383 #ifndef CONFIG_USER_ONLY
384 static void check_privileged(DisasContext *s)
386 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
387 gen_program_exception(s, PGM_PRIVILEGED);
390 #endif
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
394 TCGv_i64 tmp = tcg_temp_new_i64();
395 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
397 /* Note that d2 is limited to 20 bits, signed. If we crop negative
398 displacements early we create larger immedate addends. */
400 /* Note that addi optimizes the imm==0 case. */
401 if (b2 && x2) {
402 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
403 tcg_gen_addi_i64(tmp, tmp, d2);
404 } else if (b2) {
405 tcg_gen_addi_i64(tmp, regs[b2], d2);
406 } else if (x2) {
407 tcg_gen_addi_i64(tmp, regs[x2], d2);
408 } else {
409 if (need_31) {
410 d2 &= 0x7fffffff;
411 need_31 = false;
413 tcg_gen_movi_i64(tmp, d2);
415 if (need_31) {
416 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
419 return tmp;
422 static inline bool live_cc_data(DisasContext *s)
424 return (s->cc_op != CC_OP_DYNAMIC
425 && s->cc_op != CC_OP_STATIC
426 && s->cc_op > 3);
429 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 if (live_cc_data(s)) {
432 tcg_gen_discard_i64(cc_src);
433 tcg_gen_discard_i64(cc_dst);
434 tcg_gen_discard_i64(cc_vr);
436 s->cc_op = CC_OP_CONST0 + val;
439 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
441 if (live_cc_data(s)) {
442 tcg_gen_discard_i64(cc_src);
443 tcg_gen_discard_i64(cc_vr);
445 tcg_gen_mov_i64(cc_dst, dst);
446 s->cc_op = op;
449 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
450 TCGv_i64 dst)
452 if (live_cc_data(s)) {
453 tcg_gen_discard_i64(cc_vr);
455 tcg_gen_mov_i64(cc_src, src);
456 tcg_gen_mov_i64(cc_dst, dst);
457 s->cc_op = op;
460 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
461 TCGv_i64 dst, TCGv_i64 vr)
463 tcg_gen_mov_i64(cc_src, src);
464 tcg_gen_mov_i64(cc_dst, dst);
465 tcg_gen_mov_i64(cc_vr, vr);
466 s->cc_op = op;
469 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
471 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
474 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
476 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
479 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
481 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
484 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
486 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
489 /* CC value is in env->cc_op */
490 static void set_cc_static(DisasContext *s)
492 if (live_cc_data(s)) {
493 tcg_gen_discard_i64(cc_src);
494 tcg_gen_discard_i64(cc_dst);
495 tcg_gen_discard_i64(cc_vr);
497 s->cc_op = CC_OP_STATIC;
500 /* calculates cc into cc_op */
501 static void gen_op_calc_cc(DisasContext *s)
503 TCGv_i32 local_cc_op;
504 TCGv_i64 dummy;
506 TCGV_UNUSED_I32(local_cc_op);
507 TCGV_UNUSED_I64(dummy);
508 switch (s->cc_op) {
509 default:
510 dummy = tcg_const_i64(0);
511 /* FALLTHRU */
512 case CC_OP_ADD_64:
513 case CC_OP_ADDU_64:
514 case CC_OP_ADDC_64:
515 case CC_OP_SUB_64:
516 case CC_OP_SUBU_64:
517 case CC_OP_SUBB_64:
518 case CC_OP_ADD_32:
519 case CC_OP_ADDU_32:
520 case CC_OP_ADDC_32:
521 case CC_OP_SUB_32:
522 case CC_OP_SUBU_32:
523 case CC_OP_SUBB_32:
524 local_cc_op = tcg_const_i32(s->cc_op);
525 break;
526 case CC_OP_CONST0:
527 case CC_OP_CONST1:
528 case CC_OP_CONST2:
529 case CC_OP_CONST3:
530 case CC_OP_STATIC:
531 case CC_OP_DYNAMIC:
532 break;
535 switch (s->cc_op) {
536 case CC_OP_CONST0:
537 case CC_OP_CONST1:
538 case CC_OP_CONST2:
539 case CC_OP_CONST3:
540 /* s->cc_op is the cc value */
541 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
542 break;
543 case CC_OP_STATIC:
544 /* env->cc_op already is the cc value */
545 break;
546 case CC_OP_NZ:
547 case CC_OP_ABS_64:
548 case CC_OP_NABS_64:
549 case CC_OP_ABS_32:
550 case CC_OP_NABS_32:
551 case CC_OP_LTGT0_32:
552 case CC_OP_LTGT0_64:
553 case CC_OP_COMP_32:
554 case CC_OP_COMP_64:
555 case CC_OP_NZ_F32:
556 case CC_OP_NZ_F64:
557 case CC_OP_FLOGR:
558 /* 1 argument */
559 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
560 break;
561 case CC_OP_ICM:
562 case CC_OP_LTGT_32:
563 case CC_OP_LTGT_64:
564 case CC_OP_LTUGTU_32:
565 case CC_OP_LTUGTU_64:
566 case CC_OP_TM_32:
567 case CC_OP_TM_64:
568 case CC_OP_SLA_32:
569 case CC_OP_SLA_64:
570 case CC_OP_NZ_F128:
571 /* 2 arguments */
572 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
573 break;
574 case CC_OP_ADD_64:
575 case CC_OP_ADDU_64:
576 case CC_OP_ADDC_64:
577 case CC_OP_SUB_64:
578 case CC_OP_SUBU_64:
579 case CC_OP_SUBB_64:
580 case CC_OP_ADD_32:
581 case CC_OP_ADDU_32:
582 case CC_OP_ADDC_32:
583 case CC_OP_SUB_32:
584 case CC_OP_SUBU_32:
585 case CC_OP_SUBB_32:
586 /* 3 arguments */
587 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
588 break;
589 case CC_OP_DYNAMIC:
590 /* unknown operation - assume 3 arguments and cc_op in env */
591 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
592 break;
593 default:
594 tcg_abort();
597 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
598 tcg_temp_free_i32(local_cc_op);
600 if (!TCGV_IS_UNUSED_I64(dummy)) {
601 tcg_temp_free_i64(dummy);
604 /* We now have cc in cc_op as constant */
605 set_cc_static(s);
608 static int use_goto_tb(DisasContext *s, uint64_t dest)
610 /* NOTE: we handle the case where the TB spans two pages here */
611 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
612 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
613 && !s->singlestep_enabled
614 && !(s->tb->cflags & CF_LAST_IO)
615 && !(s->tb->flags & FLAG_MASK_PER));
618 static void account_noninline_branch(DisasContext *s, int cc_op)
620 #ifdef DEBUG_INLINE_BRANCHES
621 inline_branch_miss[cc_op]++;
622 #endif
625 static void account_inline_branch(DisasContext *s, int cc_op)
627 #ifdef DEBUG_INLINE_BRANCHES
628 inline_branch_hit[cc_op]++;
629 #endif
632 /* Table of mask values to comparison codes, given a comparison as input.
633 For such, CC=3 should not be possible. */
634 static const TCGCond ltgt_cond[16] = {
635 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
636 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
637 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
638 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
639 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
640 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
641 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
642 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
645 /* Table of mask values to comparison codes, given a logic op as input.
646 For such, only CC=0 and CC=1 should be possible. */
647 static const TCGCond nz_cond[16] = {
648 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
649 TCG_COND_NEVER, TCG_COND_NEVER,
650 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
651 TCG_COND_NE, TCG_COND_NE,
652 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
653 TCG_COND_EQ, TCG_COND_EQ,
654 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
655 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
658 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
659 details required to generate a TCG comparison. */
660 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
662 TCGCond cond;
663 enum cc_op old_cc_op = s->cc_op;
665 if (mask == 15 || mask == 0) {
666 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
667 c->u.s32.a = cc_op;
668 c->u.s32.b = cc_op;
669 c->g1 = c->g2 = true;
670 c->is_64 = false;
671 return;
674 /* Find the TCG condition for the mask + cc op. */
675 switch (old_cc_op) {
676 case CC_OP_LTGT0_32:
677 case CC_OP_LTGT0_64:
678 case CC_OP_LTGT_32:
679 case CC_OP_LTGT_64:
680 cond = ltgt_cond[mask];
681 if (cond == TCG_COND_NEVER) {
682 goto do_dynamic;
684 account_inline_branch(s, old_cc_op);
685 break;
687 case CC_OP_LTUGTU_32:
688 case CC_OP_LTUGTU_64:
689 cond = tcg_unsigned_cond(ltgt_cond[mask]);
690 if (cond == TCG_COND_NEVER) {
691 goto do_dynamic;
693 account_inline_branch(s, old_cc_op);
694 break;
696 case CC_OP_NZ:
697 cond = nz_cond[mask];
698 if (cond == TCG_COND_NEVER) {
699 goto do_dynamic;
701 account_inline_branch(s, old_cc_op);
702 break;
704 case CC_OP_TM_32:
705 case CC_OP_TM_64:
706 switch (mask) {
707 case 8:
708 cond = TCG_COND_EQ;
709 break;
710 case 4 | 2 | 1:
711 cond = TCG_COND_NE;
712 break;
713 default:
714 goto do_dynamic;
716 account_inline_branch(s, old_cc_op);
717 break;
719 case CC_OP_ICM:
720 switch (mask) {
721 case 8:
722 cond = TCG_COND_EQ;
723 break;
724 case 4 | 2 | 1:
725 case 4 | 2:
726 cond = TCG_COND_NE;
727 break;
728 default:
729 goto do_dynamic;
731 account_inline_branch(s, old_cc_op);
732 break;
734 case CC_OP_FLOGR:
735 switch (mask & 0xa) {
736 case 8: /* src == 0 -> no one bit found */
737 cond = TCG_COND_EQ;
738 break;
739 case 2: /* src != 0 -> one bit found */
740 cond = TCG_COND_NE;
741 break;
742 default:
743 goto do_dynamic;
745 account_inline_branch(s, old_cc_op);
746 break;
748 case CC_OP_ADDU_32:
749 case CC_OP_ADDU_64:
750 switch (mask) {
751 case 8 | 2: /* vr == 0 */
752 cond = TCG_COND_EQ;
753 break;
754 case 4 | 1: /* vr != 0 */
755 cond = TCG_COND_NE;
756 break;
757 case 8 | 4: /* no carry -> vr >= src */
758 cond = TCG_COND_GEU;
759 break;
760 case 2 | 1: /* carry -> vr < src */
761 cond = TCG_COND_LTU;
762 break;
763 default:
764 goto do_dynamic;
766 account_inline_branch(s, old_cc_op);
767 break;
769 case CC_OP_SUBU_32:
770 case CC_OP_SUBU_64:
771 /* Note that CC=0 is impossible; treat it as dont-care. */
772 switch (mask & 7) {
773 case 2: /* zero -> op1 == op2 */
774 cond = TCG_COND_EQ;
775 break;
776 case 4 | 1: /* !zero -> op1 != op2 */
777 cond = TCG_COND_NE;
778 break;
779 case 4: /* borrow (!carry) -> op1 < op2 */
780 cond = TCG_COND_LTU;
781 break;
782 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
783 cond = TCG_COND_GEU;
784 break;
785 default:
786 goto do_dynamic;
788 account_inline_branch(s, old_cc_op);
789 break;
791 default:
792 do_dynamic:
793 /* Calculate cc value. */
794 gen_op_calc_cc(s);
795 /* FALLTHRU */
797 case CC_OP_STATIC:
798 /* Jump based on CC. We'll load up the real cond below;
799 the assignment here merely avoids a compiler warning. */
800 account_noninline_branch(s, old_cc_op);
801 old_cc_op = CC_OP_STATIC;
802 cond = TCG_COND_NEVER;
803 break;
806 /* Load up the arguments of the comparison. */
807 c->is_64 = true;
808 c->g1 = c->g2 = false;
809 switch (old_cc_op) {
810 case CC_OP_LTGT0_32:
811 c->is_64 = false;
812 c->u.s32.a = tcg_temp_new_i32();
813 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
814 c->u.s32.b = tcg_const_i32(0);
815 break;
816 case CC_OP_LTGT_32:
817 case CC_OP_LTUGTU_32:
818 case CC_OP_SUBU_32:
819 c->is_64 = false;
820 c->u.s32.a = tcg_temp_new_i32();
821 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
822 c->u.s32.b = tcg_temp_new_i32();
823 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
824 break;
826 case CC_OP_LTGT0_64:
827 case CC_OP_NZ:
828 case CC_OP_FLOGR:
829 c->u.s64.a = cc_dst;
830 c->u.s64.b = tcg_const_i64(0);
831 c->g1 = true;
832 break;
833 case CC_OP_LTGT_64:
834 case CC_OP_LTUGTU_64:
835 case CC_OP_SUBU_64:
836 c->u.s64.a = cc_src;
837 c->u.s64.b = cc_dst;
838 c->g1 = c->g2 = true;
839 break;
841 case CC_OP_TM_32:
842 case CC_OP_TM_64:
843 case CC_OP_ICM:
844 c->u.s64.a = tcg_temp_new_i64();
845 c->u.s64.b = tcg_const_i64(0);
846 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
847 break;
849 case CC_OP_ADDU_32:
850 c->is_64 = false;
851 c->u.s32.a = tcg_temp_new_i32();
852 c->u.s32.b = tcg_temp_new_i32();
853 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
854 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
855 tcg_gen_movi_i32(c->u.s32.b, 0);
856 } else {
857 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
859 break;
861 case CC_OP_ADDU_64:
862 c->u.s64.a = cc_vr;
863 c->g1 = true;
864 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
865 c->u.s64.b = tcg_const_i64(0);
866 } else {
867 c->u.s64.b = cc_src;
868 c->g2 = true;
870 break;
872 case CC_OP_STATIC:
873 c->is_64 = false;
874 c->u.s32.a = cc_op;
875 c->g1 = true;
876 switch (mask) {
877 case 0x8 | 0x4 | 0x2: /* cc != 3 */
878 cond = TCG_COND_NE;
879 c->u.s32.b = tcg_const_i32(3);
880 break;
881 case 0x8 | 0x4 | 0x1: /* cc != 2 */
882 cond = TCG_COND_NE;
883 c->u.s32.b = tcg_const_i32(2);
884 break;
885 case 0x8 | 0x2 | 0x1: /* cc != 1 */
886 cond = TCG_COND_NE;
887 c->u.s32.b = tcg_const_i32(1);
888 break;
889 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
890 cond = TCG_COND_EQ;
891 c->g1 = false;
892 c->u.s32.a = tcg_temp_new_i32();
893 c->u.s32.b = tcg_const_i32(0);
894 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
895 break;
896 case 0x8 | 0x4: /* cc < 2 */
897 cond = TCG_COND_LTU;
898 c->u.s32.b = tcg_const_i32(2);
899 break;
900 case 0x8: /* cc == 0 */
901 cond = TCG_COND_EQ;
902 c->u.s32.b = tcg_const_i32(0);
903 break;
904 case 0x4 | 0x2 | 0x1: /* cc != 0 */
905 cond = TCG_COND_NE;
906 c->u.s32.b = tcg_const_i32(0);
907 break;
908 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
909 cond = TCG_COND_NE;
910 c->g1 = false;
911 c->u.s32.a = tcg_temp_new_i32();
912 c->u.s32.b = tcg_const_i32(0);
913 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
914 break;
915 case 0x4: /* cc == 1 */
916 cond = TCG_COND_EQ;
917 c->u.s32.b = tcg_const_i32(1);
918 break;
919 case 0x2 | 0x1: /* cc > 1 */
920 cond = TCG_COND_GTU;
921 c->u.s32.b = tcg_const_i32(1);
922 break;
923 case 0x2: /* cc == 2 */
924 cond = TCG_COND_EQ;
925 c->u.s32.b = tcg_const_i32(2);
926 break;
927 case 0x1: /* cc == 3 */
928 cond = TCG_COND_EQ;
929 c->u.s32.b = tcg_const_i32(3);
930 break;
931 default:
932 /* CC is masked by something else: (8 >> cc) & mask. */
933 cond = TCG_COND_NE;
934 c->g1 = false;
935 c->u.s32.a = tcg_const_i32(8);
936 c->u.s32.b = tcg_const_i32(0);
937 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
938 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
939 break;
941 break;
943 default:
944 abort();
946 c->cond = cond;
949 static void free_compare(DisasCompare *c)
951 if (!c->g1) {
952 if (c->is_64) {
953 tcg_temp_free_i64(c->u.s64.a);
954 } else {
955 tcg_temp_free_i32(c->u.s32.a);
958 if (!c->g2) {
959 if (c->is_64) {
960 tcg_temp_free_i64(c->u.s64.b);
961 } else {
962 tcg_temp_free_i32(c->u.s32.b);
967 /* ====================================================================== */
968 /* Define the insn format enumeration. */
969 #define F0(N) FMT_##N,
970 #define F1(N, X1) F0(N)
971 #define F2(N, X1, X2) F0(N)
972 #define F3(N, X1, X2, X3) F0(N)
973 #define F4(N, X1, X2, X3, X4) F0(N)
974 #define F5(N, X1, X2, X3, X4, X5) F0(N)
976 typedef enum {
977 #include "insn-format.def"
978 } DisasFormat;
980 #undef F0
981 #undef F1
982 #undef F2
983 #undef F3
984 #undef F4
985 #undef F5
987 /* Define a structure to hold the decoded fields. We'll store each inside
988 an array indexed by an enum. In order to conserve memory, we'll arrange
989 for fields that do not exist at the same time to overlap, thus the "C"
990 for compact. For checking purposes there is an "O" for original index
991 as well that will be applied to availability bitmaps. */
993 enum DisasFieldIndexO {
994 FLD_O_r1,
995 FLD_O_r2,
996 FLD_O_r3,
997 FLD_O_m1,
998 FLD_O_m3,
999 FLD_O_m4,
1000 FLD_O_b1,
1001 FLD_O_b2,
1002 FLD_O_b4,
1003 FLD_O_d1,
1004 FLD_O_d2,
1005 FLD_O_d4,
1006 FLD_O_x2,
1007 FLD_O_l1,
1008 FLD_O_l2,
1009 FLD_O_i1,
1010 FLD_O_i2,
1011 FLD_O_i3,
1012 FLD_O_i4,
1013 FLD_O_i5
1016 enum DisasFieldIndexC {
1017 FLD_C_r1 = 0,
1018 FLD_C_m1 = 0,
1019 FLD_C_b1 = 0,
1020 FLD_C_i1 = 0,
1022 FLD_C_r2 = 1,
1023 FLD_C_b2 = 1,
1024 FLD_C_i2 = 1,
1026 FLD_C_r3 = 2,
1027 FLD_C_m3 = 2,
1028 FLD_C_i3 = 2,
1030 FLD_C_m4 = 3,
1031 FLD_C_b4 = 3,
1032 FLD_C_i4 = 3,
1033 FLD_C_l1 = 3,
1035 FLD_C_i5 = 4,
1036 FLD_C_d1 = 4,
1038 FLD_C_d2 = 5,
1040 FLD_C_d4 = 6,
1041 FLD_C_x2 = 6,
1042 FLD_C_l2 = 6,
1044 NUM_C_FIELD = 7
1047 struct DisasFields {
1048 uint64_t raw_insn;
1049 unsigned op:8;
1050 unsigned op2:8;
1051 unsigned presentC:16;
1052 unsigned int presentO;
1053 int c[NUM_C_FIELD];
1056 /* This is the way fields are to be accessed out of DisasFields. */
1057 #define have_field(S, F) have_field1((S), FLD_O_##F)
1058 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1060 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1062 return (f->presentO >> c) & 1;
1065 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1066 enum DisasFieldIndexC c)
1068 assert(have_field1(f, o));
1069 return f->c[c];
1072 /* Describe the layout of each field in each format. */
1073 typedef struct DisasField {
1074 unsigned int beg:8;
1075 unsigned int size:8;
1076 unsigned int type:2;
1077 unsigned int indexC:6;
1078 enum DisasFieldIndexO indexO:8;
1079 } DisasField;
1081 typedef struct DisasFormatInfo {
1082 DisasField op[NUM_C_FIELD];
1083 } DisasFormatInfo;
1085 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1086 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1087 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1088 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1089 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1090 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1091 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1092 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1093 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1094 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1095 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1096 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1097 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1098 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1100 #define F0(N) { { } },
1101 #define F1(N, X1) { { X1 } },
1102 #define F2(N, X1, X2) { { X1, X2 } },
1103 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1104 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1105 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1107 static const DisasFormatInfo format_info[] = {
1108 #include "insn-format.def"
1111 #undef F0
1112 #undef F1
1113 #undef F2
1114 #undef F3
1115 #undef F4
1116 #undef F5
1117 #undef R
1118 #undef M
1119 #undef BD
1120 #undef BXD
1121 #undef BDL
1122 #undef BXDL
1123 #undef I
1124 #undef L
1126 /* Generally, we'll extract operands into this structures, operate upon
1127 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1128 of routines below for more details. */
1129 typedef struct {
1130 bool g_out, g_out2, g_in1, g_in2;
1131 TCGv_i64 out, out2, in1, in2;
1132 TCGv_i64 addr1;
1133 } DisasOps;
1135 /* Instructions can place constraints on their operands, raising specification
1136 exceptions if they are violated. To make this easy to automate, each "in1",
1137 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1138 of the following, or 0. To make this easy to document, we'll put the
1139 SPEC_<name> defines next to <name>. */
1141 #define SPEC_r1_even 1
1142 #define SPEC_r2_even 2
1143 #define SPEC_r3_even 4
1144 #define SPEC_r1_f128 8
1145 #define SPEC_r2_f128 16
1147 /* Return values from translate_one, indicating the state of the TB. */
1148 typedef enum {
1149 /* Continue the TB. */
1150 NO_EXIT,
1151 /* We have emitted one or more goto_tb. No fixup required. */
1152 EXIT_GOTO_TB,
1153 /* We are not using a goto_tb (for whatever reason), but have updated
1154 the PC (for whatever reason), so there's no need to do it again on
1155 exiting the TB. */
1156 EXIT_PC_UPDATED,
1157 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1158 updated the PC for the next instruction to be executed. */
1159 EXIT_PC_STALE,
1160 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1161 No following code will be executed. */
1162 EXIT_NORETURN,
1163 } ExitStatus;
1165 typedef enum DisasFacility {
1166 FAC_Z, /* zarch (default) */
1167 FAC_CASS, /* compare and swap and store */
1168 FAC_CASS2, /* compare and swap and store 2*/
1169 FAC_DFP, /* decimal floating point */
1170 FAC_DFPR, /* decimal floating point rounding */
1171 FAC_DO, /* distinct operands */
1172 FAC_EE, /* execute extensions */
1173 FAC_EI, /* extended immediate */
1174 FAC_FPE, /* floating point extension */
1175 FAC_FPSSH, /* floating point support sign handling */
1176 FAC_FPRGR, /* FPR-GR transfer */
1177 FAC_GIE, /* general instructions extension */
1178 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1179 FAC_HW, /* high-word */
1180 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1181 FAC_MIE, /* miscellaneous-instruction-extensions */
1182 FAC_LAT, /* load-and-trap */
1183 FAC_LOC, /* load/store on condition */
1184 FAC_LD, /* long displacement */
1185 FAC_PC, /* population count */
1186 FAC_SCF, /* store clock fast */
1187 FAC_SFLE, /* store facility list extended */
1188 FAC_ILA, /* interlocked access facility 1 */
1189 } DisasFacility;
1191 struct DisasInsn {
1192 unsigned opc:16;
1193 DisasFormat fmt:8;
1194 DisasFacility fac:8;
1195 unsigned spec:8;
1197 const char *name;
1199 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1200 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1201 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1202 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1203 void (*help_cout)(DisasContext *, DisasOps *);
1204 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1206 uint64_t data;
1209 /* ====================================================================== */
1210 /* Miscellaneous helpers, used by several operations. */
1212 static void help_l2_shift(DisasContext *s, DisasFields *f,
1213 DisasOps *o, int mask)
1215 int b2 = get_field(f, b2);
1216 int d2 = get_field(f, d2);
1218 if (b2 == 0) {
1219 o->in2 = tcg_const_i64(d2 & mask);
1220 } else {
1221 o->in2 = get_address(s, 0, b2, d2);
1222 tcg_gen_andi_i64(o->in2, o->in2, mask);
1226 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1228 if (dest == s->next_pc) {
1229 per_branch(s, true);
1230 return NO_EXIT;
1232 if (use_goto_tb(s, dest)) {
1233 update_cc_op(s);
1234 per_breaking_event(s);
1235 tcg_gen_goto_tb(0);
1236 tcg_gen_movi_i64(psw_addr, dest);
1237 tcg_gen_exit_tb((uintptr_t)s->tb);
1238 return EXIT_GOTO_TB;
1239 } else {
1240 tcg_gen_movi_i64(psw_addr, dest);
1241 per_branch(s, false);
1242 return EXIT_PC_UPDATED;
1246 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1247 bool is_imm, int imm, TCGv_i64 cdest)
1249 ExitStatus ret;
1250 uint64_t dest = s->pc + 2 * imm;
1251 TCGLabel *lab;
1253 /* Take care of the special cases first. */
1254 if (c->cond == TCG_COND_NEVER) {
1255 ret = NO_EXIT;
1256 goto egress;
1258 if (is_imm) {
1259 if (dest == s->next_pc) {
1260 /* Branch to next. */
1261 per_branch(s, true);
1262 ret = NO_EXIT;
1263 goto egress;
1265 if (c->cond == TCG_COND_ALWAYS) {
1266 ret = help_goto_direct(s, dest);
1267 goto egress;
1269 } else {
1270 if (TCGV_IS_UNUSED_I64(cdest)) {
1271 /* E.g. bcr %r0 -> no branch. */
1272 ret = NO_EXIT;
1273 goto egress;
1275 if (c->cond == TCG_COND_ALWAYS) {
1276 tcg_gen_mov_i64(psw_addr, cdest);
1277 per_branch(s, false);
1278 ret = EXIT_PC_UPDATED;
1279 goto egress;
1283 if (use_goto_tb(s, s->next_pc)) {
1284 if (is_imm && use_goto_tb(s, dest)) {
1285 /* Both exits can use goto_tb. */
1286 update_cc_op(s);
1288 lab = gen_new_label();
1289 if (c->is_64) {
1290 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1291 } else {
1292 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1295 /* Branch not taken. */
1296 tcg_gen_goto_tb(0);
1297 tcg_gen_movi_i64(psw_addr, s->next_pc);
1298 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1300 /* Branch taken. */
1301 gen_set_label(lab);
1302 per_breaking_event(s);
1303 tcg_gen_goto_tb(1);
1304 tcg_gen_movi_i64(psw_addr, dest);
1305 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1307 ret = EXIT_GOTO_TB;
1308 } else {
1309 /* Fallthru can use goto_tb, but taken branch cannot. */
1310 /* Store taken branch destination before the brcond. This
1311 avoids having to allocate a new local temp to hold it.
1312 We'll overwrite this in the not taken case anyway. */
1313 if (!is_imm) {
1314 tcg_gen_mov_i64(psw_addr, cdest);
1317 lab = gen_new_label();
1318 if (c->is_64) {
1319 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1320 } else {
1321 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1324 /* Branch not taken. */
1325 update_cc_op(s);
1326 tcg_gen_goto_tb(0);
1327 tcg_gen_movi_i64(psw_addr, s->next_pc);
1328 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1330 gen_set_label(lab);
1331 if (is_imm) {
1332 tcg_gen_movi_i64(psw_addr, dest);
1334 per_breaking_event(s);
1335 ret = EXIT_PC_UPDATED;
1337 } else {
1338 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1339 Most commonly we're single-stepping or some other condition that
1340 disables all use of goto_tb. Just update the PC and exit. */
1342 TCGv_i64 next = tcg_const_i64(s->next_pc);
1343 if (is_imm) {
1344 cdest = tcg_const_i64(dest);
1347 if (c->is_64) {
1348 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1349 cdest, next);
1350 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1351 } else {
1352 TCGv_i32 t0 = tcg_temp_new_i32();
1353 TCGv_i64 t1 = tcg_temp_new_i64();
1354 TCGv_i64 z = tcg_const_i64(0);
1355 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1356 tcg_gen_extu_i32_i64(t1, t0);
1357 tcg_temp_free_i32(t0);
1358 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1359 per_branch_cond(s, TCG_COND_NE, t1, z);
1360 tcg_temp_free_i64(t1);
1361 tcg_temp_free_i64(z);
1364 if (is_imm) {
1365 tcg_temp_free_i64(cdest);
1367 tcg_temp_free_i64(next);
1369 ret = EXIT_PC_UPDATED;
1372 egress:
1373 free_compare(c);
1374 return ret;
1377 /* ====================================================================== */
1378 /* The operations. These perform the bulk of the work for any insn,
1379 usually after the operands have been loaded and output initialized. */
1381 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1383 TCGv_i64 z, n;
1384 z = tcg_const_i64(0);
1385 n = tcg_temp_new_i64();
1386 tcg_gen_neg_i64(n, o->in2);
1387 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1388 tcg_temp_free_i64(n);
1389 tcg_temp_free_i64(z);
1390 return NO_EXIT;
1393 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1395 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1396 return NO_EXIT;
1399 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1401 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1402 return NO_EXIT;
1405 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1407 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1408 tcg_gen_mov_i64(o->out2, o->in2);
1409 return NO_EXIT;
1412 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1414 tcg_gen_add_i64(o->out, o->in1, o->in2);
1415 return NO_EXIT;
1418 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1420 DisasCompare cmp;
1421 TCGv_i64 carry;
1423 tcg_gen_add_i64(o->out, o->in1, o->in2);
1425 /* The carry flag is the msb of CC, therefore the branch mask that would
1426 create that comparison is 3. Feeding the generated comparison to
1427 setcond produces the carry flag that we desire. */
1428 disas_jcc(s, &cmp, 3);
1429 carry = tcg_temp_new_i64();
1430 if (cmp.is_64) {
1431 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1432 } else {
1433 TCGv_i32 t = tcg_temp_new_i32();
1434 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1435 tcg_gen_extu_i32_i64(carry, t);
1436 tcg_temp_free_i32(t);
1438 free_compare(&cmp);
1440 tcg_gen_add_i64(o->out, o->out, carry);
1441 tcg_temp_free_i64(carry);
1442 return NO_EXIT;
1445 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1447 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1448 return NO_EXIT;
1451 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1453 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1454 return NO_EXIT;
1457 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1459 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1460 return_low128(o->out2);
1461 return NO_EXIT;
1464 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1466 tcg_gen_and_i64(o->out, o->in1, o->in2);
1467 return NO_EXIT;
1470 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1472 int shift = s->insn->data & 0xff;
1473 int size = s->insn->data >> 8;
1474 uint64_t mask = ((1ull << size) - 1) << shift;
1476 assert(!o->g_in2);
1477 tcg_gen_shli_i64(o->in2, o->in2, shift);
1478 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1479 tcg_gen_and_i64(o->out, o->in1, o->in2);
1481 /* Produce the CC from only the bits manipulated. */
1482 tcg_gen_andi_i64(cc_dst, o->out, mask);
1483 set_cc_nz_u64(s, cc_dst);
1484 return NO_EXIT;
1487 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1489 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1490 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1491 tcg_gen_mov_i64(psw_addr, o->in2);
1492 per_branch(s, false);
1493 return EXIT_PC_UPDATED;
1494 } else {
1495 return NO_EXIT;
1499 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1501 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1502 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1505 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1507 int m1 = get_field(s->fields, m1);
1508 bool is_imm = have_field(s->fields, i2);
1509 int imm = is_imm ? get_field(s->fields, i2) : 0;
1510 DisasCompare c;
1512 disas_jcc(s, &c, m1);
1513 return help_branch(s, &c, is_imm, imm, o->in2);
1516 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1518 int r1 = get_field(s->fields, r1);
1519 bool is_imm = have_field(s->fields, i2);
1520 int imm = is_imm ? get_field(s->fields, i2) : 0;
1521 DisasCompare c;
1522 TCGv_i64 t;
1524 c.cond = TCG_COND_NE;
1525 c.is_64 = false;
1526 c.g1 = false;
1527 c.g2 = false;
1529 t = tcg_temp_new_i64();
1530 tcg_gen_subi_i64(t, regs[r1], 1);
1531 store_reg32_i64(r1, t);
1532 c.u.s32.a = tcg_temp_new_i32();
1533 c.u.s32.b = tcg_const_i32(0);
1534 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1535 tcg_temp_free_i64(t);
1537 return help_branch(s, &c, is_imm, imm, o->in2);
1540 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1542 int r1 = get_field(s->fields, r1);
1543 int imm = get_field(s->fields, i2);
1544 DisasCompare c;
1545 TCGv_i64 t;
1547 c.cond = TCG_COND_NE;
1548 c.is_64 = false;
1549 c.g1 = false;
1550 c.g2 = false;
1552 t = tcg_temp_new_i64();
1553 tcg_gen_shri_i64(t, regs[r1], 32);
1554 tcg_gen_subi_i64(t, t, 1);
1555 store_reg32h_i64(r1, t);
1556 c.u.s32.a = tcg_temp_new_i32();
1557 c.u.s32.b = tcg_const_i32(0);
1558 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1559 tcg_temp_free_i64(t);
1561 return help_branch(s, &c, 1, imm, o->in2);
1564 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1566 int r1 = get_field(s->fields, r1);
1567 bool is_imm = have_field(s->fields, i2);
1568 int imm = is_imm ? get_field(s->fields, i2) : 0;
1569 DisasCompare c;
1571 c.cond = TCG_COND_NE;
1572 c.is_64 = true;
1573 c.g1 = true;
1574 c.g2 = false;
1576 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1577 c.u.s64.a = regs[r1];
1578 c.u.s64.b = tcg_const_i64(0);
1580 return help_branch(s, &c, is_imm, imm, o->in2);
1583 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1585 int r1 = get_field(s->fields, r1);
1586 int r3 = get_field(s->fields, r3);
1587 bool is_imm = have_field(s->fields, i2);
1588 int imm = is_imm ? get_field(s->fields, i2) : 0;
1589 DisasCompare c;
1590 TCGv_i64 t;
1592 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1593 c.is_64 = false;
1594 c.g1 = false;
1595 c.g2 = false;
1597 t = tcg_temp_new_i64();
1598 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1599 c.u.s32.a = tcg_temp_new_i32();
1600 c.u.s32.b = tcg_temp_new_i32();
1601 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1602 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1603 store_reg32_i64(r1, t);
1604 tcg_temp_free_i64(t);
1606 return help_branch(s, &c, is_imm, imm, o->in2);
1609 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1611 int r1 = get_field(s->fields, r1);
1612 int r3 = get_field(s->fields, r3);
1613 bool is_imm = have_field(s->fields, i2);
1614 int imm = is_imm ? get_field(s->fields, i2) : 0;
1615 DisasCompare c;
1617 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1618 c.is_64 = true;
1620 if (r1 == (r3 | 1)) {
1621 c.u.s64.b = load_reg(r3 | 1);
1622 c.g2 = false;
1623 } else {
1624 c.u.s64.b = regs[r3 | 1];
1625 c.g2 = true;
1628 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1629 c.u.s64.a = regs[r1];
1630 c.g1 = true;
1632 return help_branch(s, &c, is_imm, imm, o->in2);
1635 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1637 int imm, m3 = get_field(s->fields, m3);
1638 bool is_imm;
1639 DisasCompare c;
1641 c.cond = ltgt_cond[m3];
1642 if (s->insn->data) {
1643 c.cond = tcg_unsigned_cond(c.cond);
1645 c.is_64 = c.g1 = c.g2 = true;
1646 c.u.s64.a = o->in1;
1647 c.u.s64.b = o->in2;
1649 is_imm = have_field(s->fields, i4);
1650 if (is_imm) {
1651 imm = get_field(s->fields, i4);
1652 } else {
1653 imm = 0;
1654 o->out = get_address(s, 0, get_field(s->fields, b4),
1655 get_field(s->fields, d4));
1658 return help_branch(s, &c, is_imm, imm, o->out);
1661 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1663 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1664 set_cc_static(s);
1665 return NO_EXIT;
1668 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1670 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1671 set_cc_static(s);
1672 return NO_EXIT;
1675 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1677 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1678 set_cc_static(s);
1679 return NO_EXIT;
1682 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1684 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1685 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1686 tcg_temp_free_i32(m3);
1687 gen_set_cc_nz_f32(s, o->in2);
1688 return NO_EXIT;
1691 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1693 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1694 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1695 tcg_temp_free_i32(m3);
1696 gen_set_cc_nz_f64(s, o->in2);
1697 return NO_EXIT;
1700 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1702 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1703 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1704 tcg_temp_free_i32(m3);
1705 gen_set_cc_nz_f128(s, o->in1, o->in2);
1706 return NO_EXIT;
1709 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1711 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1712 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1713 tcg_temp_free_i32(m3);
1714 gen_set_cc_nz_f32(s, o->in2);
1715 return NO_EXIT;
1718 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1720 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1721 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1722 tcg_temp_free_i32(m3);
1723 gen_set_cc_nz_f64(s, o->in2);
1724 return NO_EXIT;
1727 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1729 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1730 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1731 tcg_temp_free_i32(m3);
1732 gen_set_cc_nz_f128(s, o->in1, o->in2);
1733 return NO_EXIT;
1736 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1738 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1739 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1740 tcg_temp_free_i32(m3);
1741 gen_set_cc_nz_f32(s, o->in2);
1742 return NO_EXIT;
1745 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1747 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1748 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1749 tcg_temp_free_i32(m3);
1750 gen_set_cc_nz_f64(s, o->in2);
1751 return NO_EXIT;
1754 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1756 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1757 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1758 tcg_temp_free_i32(m3);
1759 gen_set_cc_nz_f128(s, o->in1, o->in2);
1760 return NO_EXIT;
1763 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1765 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1766 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1767 tcg_temp_free_i32(m3);
1768 gen_set_cc_nz_f32(s, o->in2);
1769 return NO_EXIT;
1772 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1774 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1775 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1776 tcg_temp_free_i32(m3);
1777 gen_set_cc_nz_f64(s, o->in2);
1778 return NO_EXIT;
1781 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1783 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1784 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1785 tcg_temp_free_i32(m3);
1786 gen_set_cc_nz_f128(s, o->in1, o->in2);
1787 return NO_EXIT;
1790 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1792 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1793 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1794 tcg_temp_free_i32(m3);
1795 return NO_EXIT;
1798 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1800 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1801 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1802 tcg_temp_free_i32(m3);
1803 return NO_EXIT;
1806 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1808 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1810 tcg_temp_free_i32(m3);
1811 return_low128(o->out2);
1812 return NO_EXIT;
1815 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1817 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1818 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1819 tcg_temp_free_i32(m3);
1820 return NO_EXIT;
1823 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1825 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1826 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1827 tcg_temp_free_i32(m3);
1828 return NO_EXIT;
1831 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1833 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1834 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1835 tcg_temp_free_i32(m3);
1836 return_low128(o->out2);
1837 return NO_EXIT;
1840 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1842 int r2 = get_field(s->fields, r2);
1843 TCGv_i64 len = tcg_temp_new_i64();
1845 potential_page_fault(s);
1846 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1847 set_cc_static(s);
1848 return_low128(o->out);
1850 tcg_gen_add_i64(regs[r2], regs[r2], len);
1851 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1852 tcg_temp_free_i64(len);
1854 return NO_EXIT;
1857 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1859 int l = get_field(s->fields, l1);
1860 TCGv_i32 vl;
1862 switch (l + 1) {
1863 case 1:
1864 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1865 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1866 break;
1867 case 2:
1868 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1869 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1870 break;
1871 case 4:
1872 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1873 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1874 break;
1875 case 8:
1876 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1877 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1878 break;
1879 default:
1880 potential_page_fault(s);
1881 vl = tcg_const_i32(l);
1882 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1883 tcg_temp_free_i32(vl);
1884 set_cc_static(s);
1885 return NO_EXIT;
1887 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1888 return NO_EXIT;
1891 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1893 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1894 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1895 potential_page_fault(s);
1896 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1897 tcg_temp_free_i32(r1);
1898 tcg_temp_free_i32(r3);
1899 set_cc_static(s);
1900 return NO_EXIT;
1903 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1905 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1906 TCGv_i32 t1 = tcg_temp_new_i32();
1907 tcg_gen_extrl_i64_i32(t1, o->in1);
1908 potential_page_fault(s);
1909 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1910 set_cc_static(s);
1911 tcg_temp_free_i32(t1);
1912 tcg_temp_free_i32(m3);
1913 return NO_EXIT;
1916 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1918 potential_page_fault(s);
1919 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1920 set_cc_static(s);
1921 return_low128(o->in2);
1922 return NO_EXIT;
1925 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1927 TCGv_i64 t = tcg_temp_new_i64();
1928 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1929 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1930 tcg_gen_or_i64(o->out, o->out, t);
1931 tcg_temp_free_i64(t);
1932 return NO_EXIT;
1935 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1937 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1938 int d2 = get_field(s->fields, d2);
1939 int b2 = get_field(s->fields, b2);
1940 int is_64 = s->insn->data;
1941 TCGv_i64 addr, mem, cc, z;
1943 /* Note that in1 = R3 (new value) and
1944 in2 = (zero-extended) R1 (expected value). */
1946 /* Load the memory into the (temporary) output. While the PoO only talks
1947 about moving the memory to R1 on inequality, if we include equality it
1948 means that R1 is equal to the memory in all conditions. */
1949 addr = get_address(s, 0, b2, d2);
1950 if (is_64) {
1951 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1952 } else {
1953 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1956 /* Are the memory and expected values (un)equal? Note that this setcond
1957 produces the output CC value, thus the NE sense of the test. */
1958 cc = tcg_temp_new_i64();
1959 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1961 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1962 Recall that we are allowed to unconditionally issue the store (and
1963 thus any possible write trap), so (re-)store the original contents
1964 of MEM in case of inequality. */
1965 z = tcg_const_i64(0);
1966 mem = tcg_temp_new_i64();
1967 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1968 if (is_64) {
1969 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1970 } else {
1971 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1973 tcg_temp_free_i64(z);
1974 tcg_temp_free_i64(mem);
1975 tcg_temp_free_i64(addr);
1977 /* Store CC back to cc_op. Wait until after the store so that any
1978 exception gets the old cc_op value. */
1979 tcg_gen_extrl_i64_i32(cc_op, cc);
1980 tcg_temp_free_i64(cc);
1981 set_cc_static(s);
1982 return NO_EXIT;
1985 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1987 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1988 int r1 = get_field(s->fields, r1);
1989 int r3 = get_field(s->fields, r3);
1990 int d2 = get_field(s->fields, d2);
1991 int b2 = get_field(s->fields, b2);
1992 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1994 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1996 addrh = get_address(s, 0, b2, d2);
1997 addrl = get_address(s, 0, b2, d2 + 8);
1998 outh = tcg_temp_new_i64();
1999 outl = tcg_temp_new_i64();
2001 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2002 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2004 /* Fold the double-word compare with arithmetic. */
2005 cc = tcg_temp_new_i64();
2006 z = tcg_temp_new_i64();
2007 tcg_gen_xor_i64(cc, outh, regs[r1]);
2008 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2009 tcg_gen_or_i64(cc, cc, z);
2010 tcg_gen_movi_i64(z, 0);
2011 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2013 memh = tcg_temp_new_i64();
2014 meml = tcg_temp_new_i64();
2015 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2016 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2017 tcg_temp_free_i64(z);
2019 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2020 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2021 tcg_temp_free_i64(memh);
2022 tcg_temp_free_i64(meml);
2023 tcg_temp_free_i64(addrh);
2024 tcg_temp_free_i64(addrl);
2026 /* Save back state now that we've passed all exceptions. */
2027 tcg_gen_mov_i64(regs[r1], outh);
2028 tcg_gen_mov_i64(regs[r1 + 1], outl);
2029 tcg_gen_extrl_i64_i32(cc_op, cc);
2030 tcg_temp_free_i64(outh);
2031 tcg_temp_free_i64(outl);
2032 tcg_temp_free_i64(cc);
2033 set_cc_static(s);
2034 return NO_EXIT;
2037 #ifndef CONFIG_USER_ONLY
2038 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2040 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2041 check_privileged(s);
2042 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2043 tcg_temp_free_i32(r1);
2044 set_cc_static(s);
2045 return NO_EXIT;
2047 #endif
2049 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2051 TCGv_i64 t1 = tcg_temp_new_i64();
2052 TCGv_i32 t2 = tcg_temp_new_i32();
2053 tcg_gen_extrl_i64_i32(t2, o->in1);
2054 gen_helper_cvd(t1, t2);
2055 tcg_temp_free_i32(t2);
2056 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2057 tcg_temp_free_i64(t1);
2058 return NO_EXIT;
2061 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2063 int m3 = get_field(s->fields, m3);
2064 TCGLabel *lab = gen_new_label();
2065 TCGCond c;
2067 c = tcg_invert_cond(ltgt_cond[m3]);
2068 if (s->insn->data) {
2069 c = tcg_unsigned_cond(c);
2071 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2073 /* Trap. */
2074 gen_trap(s);
2076 gen_set_label(lab);
2077 return NO_EXIT;
2080 #ifndef CONFIG_USER_ONLY
2081 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2083 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2084 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2085 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2087 check_privileged(s);
2088 update_psw_addr(s);
2089 gen_op_calc_cc(s);
2091 gen_helper_diag(cpu_env, r1, r3, func_code);
2093 tcg_temp_free_i32(func_code);
2094 tcg_temp_free_i32(r3);
2095 tcg_temp_free_i32(r1);
2096 return NO_EXIT;
2098 #endif
2100 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2102 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2103 return_low128(o->out);
2104 return NO_EXIT;
2107 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2109 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2110 return_low128(o->out);
2111 return NO_EXIT;
2114 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2116 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2117 return_low128(o->out);
2118 return NO_EXIT;
2121 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2123 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2124 return_low128(o->out);
2125 return NO_EXIT;
2128 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2130 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2131 return NO_EXIT;
2134 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2136 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2137 return NO_EXIT;
2140 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2142 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2143 return_low128(o->out2);
2144 return NO_EXIT;
2147 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2149 int r2 = get_field(s->fields, r2);
2150 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2151 return NO_EXIT;
2154 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2156 /* No cache information provided. */
2157 tcg_gen_movi_i64(o->out, -1);
2158 return NO_EXIT;
2161 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2163 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2164 return NO_EXIT;
2167 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2169 int r1 = get_field(s->fields, r1);
2170 int r2 = get_field(s->fields, r2);
2171 TCGv_i64 t = tcg_temp_new_i64();
2173 /* Note the "subsequently" in the PoO, which implies a defined result
2174 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2175 tcg_gen_shri_i64(t, psw_mask, 32);
2176 store_reg32_i64(r1, t);
2177 if (r2 != 0) {
2178 store_reg32_i64(r2, psw_mask);
2181 tcg_temp_free_i64(t);
2182 return NO_EXIT;
2185 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2187 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2188 tb->flags, (ab)use the tb->cs_base field as the address of
2189 the template in memory, and grab 8 bits of tb->flags/cflags for
2190 the contents of the register. We would then recognize all this
2191 in gen_intermediate_code_internal, generating code for exactly
2192 one instruction. This new TB then gets executed normally.
2194 On the other hand, this seems to be mostly used for modifying
2195 MVC inside of memcpy, which needs a helper call anyway. So
2196 perhaps this doesn't bear thinking about any further. */
2198 TCGv_i64 tmp;
2200 update_psw_addr(s);
2201 gen_op_calc_cc(s);
2203 tmp = tcg_const_i64(s->next_pc);
2204 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2205 tcg_temp_free_i64(tmp);
2207 return NO_EXIT;
2210 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2212 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2213 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2214 tcg_temp_free_i32(m3);
2215 return NO_EXIT;
2218 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2220 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2221 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2222 tcg_temp_free_i32(m3);
2223 return NO_EXIT;
2226 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2228 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2229 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2230 return_low128(o->out2);
2231 tcg_temp_free_i32(m3);
2232 return NO_EXIT;
2235 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2237 /* We'll use the original input for cc computation, since we get to
2238 compare that against 0, which ought to be better than comparing
2239 the real output against 64. It also lets cc_dst be a convenient
2240 temporary during our computation. */
2241 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2243 /* R1 = IN ? CLZ(IN) : 64. */
2244 gen_helper_clz(o->out, o->in2);
2246 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2247 value by 64, which is undefined. But since the shift is 64 iff the
2248 input is zero, we still get the correct result after and'ing. */
2249 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2250 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2251 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2252 return NO_EXIT;
2255 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2257 int m3 = get_field(s->fields, m3);
2258 int pos, len, base = s->insn->data;
2259 TCGv_i64 tmp = tcg_temp_new_i64();
2260 uint64_t ccm;
2262 switch (m3) {
2263 case 0xf:
2264 /* Effectively a 32-bit load. */
2265 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2266 len = 32;
2267 goto one_insert;
2269 case 0xc:
2270 case 0x6:
2271 case 0x3:
2272 /* Effectively a 16-bit load. */
2273 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2274 len = 16;
2275 goto one_insert;
2277 case 0x8:
2278 case 0x4:
2279 case 0x2:
2280 case 0x1:
2281 /* Effectively an 8-bit load. */
2282 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2283 len = 8;
2284 goto one_insert;
2286 one_insert:
2287 pos = base + ctz32(m3) * 8;
2288 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2289 ccm = ((1ull << len) - 1) << pos;
2290 break;
2292 default:
2293 /* This is going to be a sequence of loads and inserts. */
2294 pos = base + 32 - 8;
2295 ccm = 0;
2296 while (m3) {
2297 if (m3 & 0x8) {
2298 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2299 tcg_gen_addi_i64(o->in2, o->in2, 1);
2300 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2301 ccm |= 0xff << pos;
2303 m3 = (m3 << 1) & 0xf;
2304 pos -= 8;
2306 break;
2309 tcg_gen_movi_i64(tmp, ccm);
2310 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2311 tcg_temp_free_i64(tmp);
2312 return NO_EXIT;
2315 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2317 int shift = s->insn->data & 0xff;
2318 int size = s->insn->data >> 8;
2319 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2320 return NO_EXIT;
2323 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2325 TCGv_i64 t1;
2327 gen_op_calc_cc(s);
2328 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2330 t1 = tcg_temp_new_i64();
2331 tcg_gen_shli_i64(t1, psw_mask, 20);
2332 tcg_gen_shri_i64(t1, t1, 36);
2333 tcg_gen_or_i64(o->out, o->out, t1);
2335 tcg_gen_extu_i32_i64(t1, cc_op);
2336 tcg_gen_shli_i64(t1, t1, 28);
2337 tcg_gen_or_i64(o->out, o->out, t1);
2338 tcg_temp_free_i64(t1);
2339 return NO_EXIT;
2342 #ifndef CONFIG_USER_ONLY
2343 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2345 check_privileged(s);
2346 gen_helper_ipte(cpu_env, o->in1, o->in2);
2347 return NO_EXIT;
2350 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2352 check_privileged(s);
2353 gen_helper_iske(o->out, cpu_env, o->in2);
2354 return NO_EXIT;
2356 #endif
2358 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2360 gen_helper_ldeb(o->out, cpu_env, o->in2);
2361 return NO_EXIT;
2364 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2366 gen_helper_ledb(o->out, cpu_env, o->in2);
2367 return NO_EXIT;
2370 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2372 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2373 return NO_EXIT;
2376 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2378 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2379 return NO_EXIT;
2382 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2384 gen_helper_lxdb(o->out, cpu_env, o->in2);
2385 return_low128(o->out2);
2386 return NO_EXIT;
2389 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2391 gen_helper_lxeb(o->out, cpu_env, o->in2);
2392 return_low128(o->out2);
2393 return NO_EXIT;
2396 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2398 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2399 return NO_EXIT;
2402 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2404 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2405 return NO_EXIT;
2408 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2410 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2411 return NO_EXIT;
2414 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2416 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2417 return NO_EXIT;
2420 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2422 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2423 return NO_EXIT;
2426 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2428 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2429 return NO_EXIT;
2432 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2434 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2435 return NO_EXIT;
2438 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2440 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2441 return NO_EXIT;
2444 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2446 TCGLabel *lab = gen_new_label();
2447 store_reg32_i64(get_field(s->fields, r1), o->in2);
2448 /* The value is stored even in case of trap. */
2449 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2450 gen_trap(s);
2451 gen_set_label(lab);
2452 return NO_EXIT;
2455 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2457 TCGLabel *lab = gen_new_label();
2458 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2459 /* The value is stored even in case of trap. */
2460 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2461 gen_trap(s);
2462 gen_set_label(lab);
2463 return NO_EXIT;
2466 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2468 TCGLabel *lab = gen_new_label();
2469 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2470 /* The value is stored even in case of trap. */
2471 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2472 gen_trap(s);
2473 gen_set_label(lab);
2474 return NO_EXIT;
2477 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2479 TCGLabel *lab = gen_new_label();
2480 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2481 /* The value is stored even in case of trap. */
2482 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2483 gen_trap(s);
2484 gen_set_label(lab);
2485 return NO_EXIT;
2488 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2490 TCGLabel *lab = gen_new_label();
2491 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2492 /* The value is stored even in case of trap. */
2493 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2494 gen_trap(s);
2495 gen_set_label(lab);
2496 return NO_EXIT;
2499 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2501 DisasCompare c;
2503 disas_jcc(s, &c, get_field(s->fields, m3));
2505 if (c.is_64) {
2506 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2507 o->in2, o->in1);
2508 free_compare(&c);
2509 } else {
2510 TCGv_i32 t32 = tcg_temp_new_i32();
2511 TCGv_i64 t, z;
2513 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2514 free_compare(&c);
2516 t = tcg_temp_new_i64();
2517 tcg_gen_extu_i32_i64(t, t32);
2518 tcg_temp_free_i32(t32);
2520 z = tcg_const_i64(0);
2521 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2522 tcg_temp_free_i64(t);
2523 tcg_temp_free_i64(z);
2526 return NO_EXIT;
2529 #ifndef CONFIG_USER_ONLY
2530 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2532 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2533 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2534 check_privileged(s);
2535 potential_page_fault(s);
2536 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2537 tcg_temp_free_i32(r1);
2538 tcg_temp_free_i32(r3);
2539 return NO_EXIT;
2542 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2544 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2545 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2546 check_privileged(s);
2547 potential_page_fault(s);
2548 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2549 tcg_temp_free_i32(r1);
2550 tcg_temp_free_i32(r3);
2551 return NO_EXIT;
2553 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2555 check_privileged(s);
2556 potential_page_fault(s);
2557 gen_helper_lra(o->out, cpu_env, o->in2);
2558 set_cc_static(s);
2559 return NO_EXIT;
2562 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2564 TCGv_i64 t1, t2;
2566 check_privileged(s);
2567 per_breaking_event(s);
2569 t1 = tcg_temp_new_i64();
2570 t2 = tcg_temp_new_i64();
2571 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2572 tcg_gen_addi_i64(o->in2, o->in2, 4);
2573 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2574 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2575 tcg_gen_shli_i64(t1, t1, 32);
2576 gen_helper_load_psw(cpu_env, t1, t2);
2577 tcg_temp_free_i64(t1);
2578 tcg_temp_free_i64(t2);
2579 return EXIT_NORETURN;
2582 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2584 TCGv_i64 t1, t2;
2586 check_privileged(s);
2587 per_breaking_event(s);
2589 t1 = tcg_temp_new_i64();
2590 t2 = tcg_temp_new_i64();
2591 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2592 tcg_gen_addi_i64(o->in2, o->in2, 8);
2593 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2594 gen_helper_load_psw(cpu_env, t1, t2);
2595 tcg_temp_free_i64(t1);
2596 tcg_temp_free_i64(t2);
2597 return EXIT_NORETURN;
2599 #endif
2601 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2603 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2604 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2605 potential_page_fault(s);
2606 gen_helper_lam(cpu_env, r1, o->in2, r3);
2607 tcg_temp_free_i32(r1);
2608 tcg_temp_free_i32(r3);
2609 return NO_EXIT;
2612 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2614 int r1 = get_field(s->fields, r1);
2615 int r3 = get_field(s->fields, r3);
2616 TCGv_i64 t1, t2;
2618 /* Only one register to read. */
2619 t1 = tcg_temp_new_i64();
2620 if (unlikely(r1 == r3)) {
2621 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2622 store_reg32_i64(r1, t1);
2623 tcg_temp_free(t1);
2624 return NO_EXIT;
2627 /* First load the values of the first and last registers to trigger
2628 possible page faults. */
2629 t2 = tcg_temp_new_i64();
2630 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2631 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2632 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2633 store_reg32_i64(r1, t1);
2634 store_reg32_i64(r3, t2);
2636 /* Only two registers to read. */
2637 if (((r1 + 1) & 15) == r3) {
2638 tcg_temp_free(t2);
2639 tcg_temp_free(t1);
2640 return NO_EXIT;
2643 /* Then load the remaining registers. Page fault can't occur. */
2644 r3 = (r3 - 1) & 15;
2645 tcg_gen_movi_i64(t2, 4);
2646 while (r1 != r3) {
2647 r1 = (r1 + 1) & 15;
2648 tcg_gen_add_i64(o->in2, o->in2, t2);
2649 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2650 store_reg32_i64(r1, t1);
2652 tcg_temp_free(t2);
2653 tcg_temp_free(t1);
2655 return NO_EXIT;
2658 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2660 int r1 = get_field(s->fields, r1);
2661 int r3 = get_field(s->fields, r3);
2662 TCGv_i64 t1, t2;
2664 /* Only one register to read. */
2665 t1 = tcg_temp_new_i64();
2666 if (unlikely(r1 == r3)) {
2667 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2668 store_reg32h_i64(r1, t1);
2669 tcg_temp_free(t1);
2670 return NO_EXIT;
2673 /* First load the values of the first and last registers to trigger
2674 possible page faults. */
2675 t2 = tcg_temp_new_i64();
2676 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2677 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2678 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2679 store_reg32h_i64(r1, t1);
2680 store_reg32h_i64(r3, t2);
2682 /* Only two registers to read. */
2683 if (((r1 + 1) & 15) == r3) {
2684 tcg_temp_free(t2);
2685 tcg_temp_free(t1);
2686 return NO_EXIT;
2689 /* Then load the remaining registers. Page fault can't occur. */
2690 r3 = (r3 - 1) & 15;
2691 tcg_gen_movi_i64(t2, 4);
2692 while (r1 != r3) {
2693 r1 = (r1 + 1) & 15;
2694 tcg_gen_add_i64(o->in2, o->in2, t2);
2695 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2696 store_reg32h_i64(r1, t1);
2698 tcg_temp_free(t2);
2699 tcg_temp_free(t1);
2701 return NO_EXIT;
2704 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2706 int r1 = get_field(s->fields, r1);
2707 int r3 = get_field(s->fields, r3);
2708 TCGv_i64 t1, t2;
2710 /* Only one register to read. */
2711 if (unlikely(r1 == r3)) {
2712 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2713 return NO_EXIT;
2716 /* First load the values of the first and last registers to trigger
2717 possible page faults. */
2718 t1 = tcg_temp_new_i64();
2719 t2 = tcg_temp_new_i64();
2720 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2721 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2722 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2723 tcg_gen_mov_i64(regs[r1], t1);
2724 tcg_temp_free(t2);
2726 /* Only two registers to read. */
2727 if (((r1 + 1) & 15) == r3) {
2728 tcg_temp_free(t1);
2729 return NO_EXIT;
2732 /* Then load the remaining registers. Page fault can't occur. */
2733 r3 = (r3 - 1) & 15;
2734 tcg_gen_movi_i64(t1, 8);
2735 while (r1 != r3) {
2736 r1 = (r1 + 1) & 15;
2737 tcg_gen_add_i64(o->in2, o->in2, t1);
2738 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2740 tcg_temp_free(t1);
2742 return NO_EXIT;
2745 #ifndef CONFIG_USER_ONLY
2746 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2748 check_privileged(s);
2749 potential_page_fault(s);
2750 gen_helper_lura(o->out, cpu_env, o->in2);
2751 return NO_EXIT;
2754 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2756 check_privileged(s);
2757 potential_page_fault(s);
2758 gen_helper_lurag(o->out, cpu_env, o->in2);
2759 return NO_EXIT;
2761 #endif
2763 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2765 o->out = o->in2;
2766 o->g_out = o->g_in2;
2767 TCGV_UNUSED_I64(o->in2);
2768 o->g_in2 = false;
2769 return NO_EXIT;
2772 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2774 int b2 = get_field(s->fields, b2);
2775 TCGv ar1 = tcg_temp_new_i64();
2777 o->out = o->in2;
2778 o->g_out = o->g_in2;
2779 TCGV_UNUSED_I64(o->in2);
2780 o->g_in2 = false;
2782 switch (s->tb->flags & FLAG_MASK_ASC) {
2783 case PSW_ASC_PRIMARY >> 32:
2784 tcg_gen_movi_i64(ar1, 0);
2785 break;
2786 case PSW_ASC_ACCREG >> 32:
2787 tcg_gen_movi_i64(ar1, 1);
2788 break;
2789 case PSW_ASC_SECONDARY >> 32:
2790 if (b2) {
2791 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2792 } else {
2793 tcg_gen_movi_i64(ar1, 0);
2795 break;
2796 case PSW_ASC_HOME >> 32:
2797 tcg_gen_movi_i64(ar1, 2);
2798 break;
2801 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2802 tcg_temp_free_i64(ar1);
2804 return NO_EXIT;
2807 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2809 o->out = o->in1;
2810 o->out2 = o->in2;
2811 o->g_out = o->g_in1;
2812 o->g_out2 = o->g_in2;
2813 TCGV_UNUSED_I64(o->in1);
2814 TCGV_UNUSED_I64(o->in2);
2815 o->g_in1 = o->g_in2 = false;
2816 return NO_EXIT;
2819 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2821 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2822 potential_page_fault(s);
2823 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2824 tcg_temp_free_i32(l);
2825 return NO_EXIT;
2828 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2830 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2831 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2832 potential_page_fault(s);
2833 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2834 tcg_temp_free_i32(r1);
2835 tcg_temp_free_i32(r2);
2836 set_cc_static(s);
2837 return NO_EXIT;
2840 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2842 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2843 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2844 potential_page_fault(s);
2845 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2846 tcg_temp_free_i32(r1);
2847 tcg_temp_free_i32(r3);
2848 set_cc_static(s);
2849 return NO_EXIT;
2852 #ifndef CONFIG_USER_ONLY
2853 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2855 int r1 = get_field(s->fields, l1);
2856 check_privileged(s);
2857 potential_page_fault(s);
2858 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2859 set_cc_static(s);
2860 return NO_EXIT;
2863 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2865 int r1 = get_field(s->fields, l1);
2866 check_privileged(s);
2867 potential_page_fault(s);
2868 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2869 set_cc_static(s);
2870 return NO_EXIT;
2872 #endif
2874 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2876 potential_page_fault(s);
2877 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2878 set_cc_static(s);
2879 return NO_EXIT;
2882 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2884 potential_page_fault(s);
2885 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2886 set_cc_static(s);
2887 return_low128(o->in2);
2888 return NO_EXIT;
2891 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2893 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2894 return NO_EXIT;
2897 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2899 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2900 return NO_EXIT;
2903 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2905 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2906 return NO_EXIT;
2909 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2911 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2912 return NO_EXIT;
2915 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2917 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2918 return NO_EXIT;
2921 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2923 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2924 return_low128(o->out2);
2925 return NO_EXIT;
2928 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2930 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2931 return_low128(o->out2);
2932 return NO_EXIT;
2935 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2937 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2938 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2939 tcg_temp_free_i64(r3);
2940 return NO_EXIT;
2943 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2945 int r3 = get_field(s->fields, r3);
2946 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2947 return NO_EXIT;
2950 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2952 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2953 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2954 tcg_temp_free_i64(r3);
2955 return NO_EXIT;
2958 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2960 int r3 = get_field(s->fields, r3);
2961 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2962 return NO_EXIT;
2965 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2967 TCGv_i64 z, n;
2968 z = tcg_const_i64(0);
2969 n = tcg_temp_new_i64();
2970 tcg_gen_neg_i64(n, o->in2);
2971 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2972 tcg_temp_free_i64(n);
2973 tcg_temp_free_i64(z);
2974 return NO_EXIT;
2977 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2979 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2980 return NO_EXIT;
2983 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2985 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2986 return NO_EXIT;
2989 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2991 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2992 tcg_gen_mov_i64(o->out2, o->in2);
2993 return NO_EXIT;
2996 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2998 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2999 potential_page_fault(s);
3000 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3001 tcg_temp_free_i32(l);
3002 set_cc_static(s);
3003 return NO_EXIT;
3006 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3008 tcg_gen_neg_i64(o->out, o->in2);
3009 return NO_EXIT;
3012 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3014 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3015 return NO_EXIT;
3018 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3020 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3021 return NO_EXIT;
3024 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3026 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3027 tcg_gen_mov_i64(o->out2, o->in2);
3028 return NO_EXIT;
3031 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3033 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3034 potential_page_fault(s);
3035 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3036 tcg_temp_free_i32(l);
3037 set_cc_static(s);
3038 return NO_EXIT;
3041 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3043 tcg_gen_or_i64(o->out, o->in1, o->in2);
3044 return NO_EXIT;
3047 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3049 int shift = s->insn->data & 0xff;
3050 int size = s->insn->data >> 8;
3051 uint64_t mask = ((1ull << size) - 1) << shift;
3053 assert(!o->g_in2);
3054 tcg_gen_shli_i64(o->in2, o->in2, shift);
3055 tcg_gen_or_i64(o->out, o->in1, o->in2);
3057 /* Produce the CC from only the bits manipulated. */
3058 tcg_gen_andi_i64(cc_dst, o->out, mask);
3059 set_cc_nz_u64(s, cc_dst);
3060 return NO_EXIT;
3063 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3065 gen_helper_popcnt(o->out, o->in2);
3066 return NO_EXIT;
3069 #ifndef CONFIG_USER_ONLY
3070 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3072 check_privileged(s);
3073 gen_helper_ptlb(cpu_env);
3074 return NO_EXIT;
3076 #endif
3078 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3080 int i3 = get_field(s->fields, i3);
3081 int i4 = get_field(s->fields, i4);
3082 int i5 = get_field(s->fields, i5);
3083 int do_zero = i4 & 0x80;
3084 uint64_t mask, imask, pmask;
3085 int pos, len, rot;
3087 /* Adjust the arguments for the specific insn. */
3088 switch (s->fields->op2) {
3089 case 0x55: /* risbg */
3090 i3 &= 63;
3091 i4 &= 63;
3092 pmask = ~0;
3093 break;
3094 case 0x5d: /* risbhg */
3095 i3 &= 31;
3096 i4 &= 31;
3097 pmask = 0xffffffff00000000ull;
3098 break;
3099 case 0x51: /* risblg */
3100 i3 &= 31;
3101 i4 &= 31;
3102 pmask = 0x00000000ffffffffull;
3103 break;
3104 default:
3105 abort();
3108 /* MASK is the set of bits to be inserted from R2.
3109 Take care for I3/I4 wraparound. */
3110 mask = pmask >> i3;
3111 if (i3 <= i4) {
3112 mask ^= pmask >> i4 >> 1;
3113 } else {
3114 mask |= ~(pmask >> i4 >> 1);
3116 mask &= pmask;
3118 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3119 insns, we need to keep the other half of the register. */
3120 imask = ~mask | ~pmask;
3121 if (do_zero) {
3122 if (s->fields->op2 == 0x55) {
3123 imask = 0;
3124 } else {
3125 imask = ~pmask;
3129 /* In some cases we can implement this with deposit, which can be more
3130 efficient on some hosts. */
3131 if (~mask == imask && i3 <= i4) {
3132 if (s->fields->op2 == 0x5d) {
3133 i3 += 32, i4 += 32;
3135 /* Note that we rotate the bits to be inserted to the lsb, not to
3136 the position as described in the PoO. */
3137 len = i4 - i3 + 1;
3138 pos = 63 - i4;
3139 rot = (i5 - pos) & 63;
3140 } else {
3141 pos = len = -1;
3142 rot = i5 & 63;
3145 /* Rotate the input as necessary. */
3146 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3148 /* Insert the selected bits into the output. */
3149 if (pos >= 0) {
3150 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3151 } else if (imask == 0) {
3152 tcg_gen_andi_i64(o->out, o->in2, mask);
3153 } else {
3154 tcg_gen_andi_i64(o->in2, o->in2, mask);
3155 tcg_gen_andi_i64(o->out, o->out, imask);
3156 tcg_gen_or_i64(o->out, o->out, o->in2);
3158 return NO_EXIT;
3161 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3163 int i3 = get_field(s->fields, i3);
3164 int i4 = get_field(s->fields, i4);
3165 int i5 = get_field(s->fields, i5);
3166 uint64_t mask;
3168 /* If this is a test-only form, arrange to discard the result. */
3169 if (i3 & 0x80) {
3170 o->out = tcg_temp_new_i64();
3171 o->g_out = false;
3174 i3 &= 63;
3175 i4 &= 63;
3176 i5 &= 63;
3178 /* MASK is the set of bits to be operated on from R2.
3179 Take care for I3/I4 wraparound. */
3180 mask = ~0ull >> i3;
3181 if (i3 <= i4) {
3182 mask ^= ~0ull >> i4 >> 1;
3183 } else {
3184 mask |= ~(~0ull >> i4 >> 1);
3187 /* Rotate the input as necessary. */
3188 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3190 /* Operate. */
3191 switch (s->fields->op2) {
3192 case 0x55: /* AND */
3193 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3194 tcg_gen_and_i64(o->out, o->out, o->in2);
3195 break;
3196 case 0x56: /* OR */
3197 tcg_gen_andi_i64(o->in2, o->in2, mask);
3198 tcg_gen_or_i64(o->out, o->out, o->in2);
3199 break;
3200 case 0x57: /* XOR */
3201 tcg_gen_andi_i64(o->in2, o->in2, mask);
3202 tcg_gen_xor_i64(o->out, o->out, o->in2);
3203 break;
3204 default:
3205 abort();
3208 /* Set the CC. */
3209 tcg_gen_andi_i64(cc_dst, o->out, mask);
3210 set_cc_nz_u64(s, cc_dst);
3211 return NO_EXIT;
3214 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3216 tcg_gen_bswap16_i64(o->out, o->in2);
3217 return NO_EXIT;
3220 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3222 tcg_gen_bswap32_i64(o->out, o->in2);
3223 return NO_EXIT;
3226 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3228 tcg_gen_bswap64_i64(o->out, o->in2);
3229 return NO_EXIT;
3232 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3234 TCGv_i32 t1 = tcg_temp_new_i32();
3235 TCGv_i32 t2 = tcg_temp_new_i32();
3236 TCGv_i32 to = tcg_temp_new_i32();
3237 tcg_gen_extrl_i64_i32(t1, o->in1);
3238 tcg_gen_extrl_i64_i32(t2, o->in2);
3239 tcg_gen_rotl_i32(to, t1, t2);
3240 tcg_gen_extu_i32_i64(o->out, to);
3241 tcg_temp_free_i32(t1);
3242 tcg_temp_free_i32(t2);
3243 tcg_temp_free_i32(to);
3244 return NO_EXIT;
3247 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3249 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3250 return NO_EXIT;
3253 #ifndef CONFIG_USER_ONLY
3254 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3256 check_privileged(s);
3257 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3258 set_cc_static(s);
3259 return NO_EXIT;
3262 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3264 check_privileged(s);
3265 gen_helper_sacf(cpu_env, o->in2);
3266 /* Addressing mode has changed, so end the block. */
3267 return EXIT_PC_STALE;
3269 #endif
3271 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3273 int sam = s->insn->data;
3274 TCGv_i64 tsam;
3275 uint64_t mask;
3277 switch (sam) {
3278 case 0:
3279 mask = 0xffffff;
3280 break;
3281 case 1:
3282 mask = 0x7fffffff;
3283 break;
3284 default:
3285 mask = -1;
3286 break;
3289 /* Bizarre but true, we check the address of the current insn for the
3290 specification exception, not the next to be executed. Thus the PoO
3291 documents that Bad Things Happen two bytes before the end. */
3292 if (s->pc & ~mask) {
3293 gen_program_exception(s, PGM_SPECIFICATION);
3294 return EXIT_NORETURN;
3296 s->next_pc &= mask;
3298 tsam = tcg_const_i64(sam);
3299 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3300 tcg_temp_free_i64(tsam);
3302 /* Always exit the TB, since we (may have) changed execution mode. */
3303 return EXIT_PC_STALE;
3306 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3308 int r1 = get_field(s->fields, r1);
3309 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3310 return NO_EXIT;
3313 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3315 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3316 return NO_EXIT;
3319 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3321 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3322 return NO_EXIT;
3325 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3327 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3328 return_low128(o->out2);
3329 return NO_EXIT;
3332 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3334 gen_helper_sqeb(o->out, cpu_env, o->in2);
3335 return NO_EXIT;
3338 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3340 gen_helper_sqdb(o->out, cpu_env, o->in2);
3341 return NO_EXIT;
3344 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3346 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3347 return_low128(o->out2);
3348 return NO_EXIT;
3351 #ifndef CONFIG_USER_ONLY
3352 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3354 check_privileged(s);
3355 potential_page_fault(s);
3356 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3357 set_cc_static(s);
3358 return NO_EXIT;
3361 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3363 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3364 check_privileged(s);
3365 potential_page_fault(s);
3366 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3367 tcg_temp_free_i32(r1);
3368 return NO_EXIT;
3370 #endif
3372 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3374 DisasCompare c;
3375 TCGv_i64 a;
3376 TCGLabel *lab;
3377 int r1;
3379 disas_jcc(s, &c, get_field(s->fields, m3));
3381 /* We want to store when the condition is fulfilled, so branch
3382 out when it's not */
3383 c.cond = tcg_invert_cond(c.cond);
3385 lab = gen_new_label();
3386 if (c.is_64) {
3387 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3388 } else {
3389 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3391 free_compare(&c);
3393 r1 = get_field(s->fields, r1);
3394 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3395 if (s->insn->data) {
3396 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3397 } else {
3398 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3400 tcg_temp_free_i64(a);
3402 gen_set_label(lab);
3403 return NO_EXIT;
3406 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3408 uint64_t sign = 1ull << s->insn->data;
3409 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3410 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3411 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3412 /* The arithmetic left shift is curious in that it does not affect
3413 the sign bit. Copy that over from the source unchanged. */
3414 tcg_gen_andi_i64(o->out, o->out, ~sign);
3415 tcg_gen_andi_i64(o->in1, o->in1, sign);
3416 tcg_gen_or_i64(o->out, o->out, o->in1);
3417 return NO_EXIT;
3420 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3422 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3423 return NO_EXIT;
3426 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3428 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3429 return NO_EXIT;
3432 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3434 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3435 return NO_EXIT;
3438 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3440 gen_helper_sfpc(cpu_env, o->in2);
3441 return NO_EXIT;
3444 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3446 gen_helper_sfas(cpu_env, o->in2);
3447 return NO_EXIT;
3450 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3452 int b2 = get_field(s->fields, b2);
3453 int d2 = get_field(s->fields, d2);
3454 TCGv_i64 t1 = tcg_temp_new_i64();
3455 TCGv_i64 t2 = tcg_temp_new_i64();
3456 int mask, pos, len;
3458 switch (s->fields->op2) {
3459 case 0x99: /* SRNM */
3460 pos = 0, len = 2;
3461 break;
3462 case 0xb8: /* SRNMB */
3463 pos = 0, len = 3;
3464 break;
3465 case 0xb9: /* SRNMT */
3466 pos = 4, len = 3;
3467 break;
3468 default:
3469 tcg_abort();
3471 mask = (1 << len) - 1;
3473 /* Insert the value into the appropriate field of the FPC. */
3474 if (b2 == 0) {
3475 tcg_gen_movi_i64(t1, d2 & mask);
3476 } else {
3477 tcg_gen_addi_i64(t1, regs[b2], d2);
3478 tcg_gen_andi_i64(t1, t1, mask);
3480 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3481 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3482 tcg_temp_free_i64(t1);
3484 /* Then install the new FPC to set the rounding mode in fpu_status. */
3485 gen_helper_sfpc(cpu_env, t2);
3486 tcg_temp_free_i64(t2);
3487 return NO_EXIT;
3490 #ifndef CONFIG_USER_ONLY
3491 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3493 check_privileged(s);
3494 tcg_gen_shri_i64(o->in2, o->in2, 4);
3495 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3496 return NO_EXIT;
3499 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3501 check_privileged(s);
3502 gen_helper_sske(cpu_env, o->in1, o->in2);
3503 return NO_EXIT;
3506 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3508 check_privileged(s);
3509 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3510 return NO_EXIT;
3513 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3515 check_privileged(s);
3516 /* ??? Surely cpu address != cpu number. In any case the previous
3517 version of this stored more than the required half-word, so it
3518 is unlikely this has ever been tested. */
3519 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3520 return NO_EXIT;
3523 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3525 gen_helper_stck(o->out, cpu_env);
3526 /* ??? We don't implement clock states. */
3527 gen_op_movi_cc(s, 0);
3528 return NO_EXIT;
3531 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3533 TCGv_i64 c1 = tcg_temp_new_i64();
3534 TCGv_i64 c2 = tcg_temp_new_i64();
3535 gen_helper_stck(c1, cpu_env);
3536 /* Shift the 64-bit value into its place as a zero-extended
3537 104-bit value. Note that "bit positions 64-103 are always
3538 non-zero so that they compare differently to STCK"; we set
3539 the least significant bit to 1. */
3540 tcg_gen_shli_i64(c2, c1, 56);
3541 tcg_gen_shri_i64(c1, c1, 8);
3542 tcg_gen_ori_i64(c2, c2, 0x10000);
3543 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3544 tcg_gen_addi_i64(o->in2, o->in2, 8);
3545 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3546 tcg_temp_free_i64(c1);
3547 tcg_temp_free_i64(c2);
3548 /* ??? We don't implement clock states. */
3549 gen_op_movi_cc(s, 0);
3550 return NO_EXIT;
3553 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3555 check_privileged(s);
3556 gen_helper_sckc(cpu_env, o->in2);
3557 return NO_EXIT;
3560 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3562 check_privileged(s);
3563 gen_helper_stckc(o->out, cpu_env);
3564 return NO_EXIT;
3567 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3569 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3570 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3571 check_privileged(s);
3572 potential_page_fault(s);
3573 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3574 tcg_temp_free_i32(r1);
3575 tcg_temp_free_i32(r3);
3576 return NO_EXIT;
3579 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3581 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3582 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3583 check_privileged(s);
3584 potential_page_fault(s);
3585 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3586 tcg_temp_free_i32(r1);
3587 tcg_temp_free_i32(r3);
3588 return NO_EXIT;
3591 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3593 TCGv_i64 t1 = tcg_temp_new_i64();
3595 check_privileged(s);
3596 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3597 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3598 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3599 tcg_temp_free_i64(t1);
3601 return NO_EXIT;
3604 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3606 check_privileged(s);
3607 gen_helper_spt(cpu_env, o->in2);
3608 return NO_EXIT;
3611 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3613 TCGv_i64 f, a;
3614 /* We really ought to have more complete indication of facilities
3615 that we implement. Address this when STFLE is implemented. */
3616 check_privileged(s);
3617 f = tcg_const_i64(0xc0000000);
3618 a = tcg_const_i64(200);
3619 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3620 tcg_temp_free_i64(f);
3621 tcg_temp_free_i64(a);
3622 return NO_EXIT;
3625 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3627 check_privileged(s);
3628 gen_helper_stpt(o->out, cpu_env);
3629 return NO_EXIT;
3632 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3634 check_privileged(s);
3635 potential_page_fault(s);
3636 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3637 set_cc_static(s);
3638 return NO_EXIT;
3641 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3643 check_privileged(s);
3644 gen_helper_spx(cpu_env, o->in2);
3645 return NO_EXIT;
3648 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3650 check_privileged(s);
3651 potential_page_fault(s);
3652 gen_helper_xsch(cpu_env, regs[1]);
3653 set_cc_static(s);
3654 return NO_EXIT;
3657 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3659 check_privileged(s);
3660 potential_page_fault(s);
3661 gen_helper_csch(cpu_env, regs[1]);
3662 set_cc_static(s);
3663 return NO_EXIT;
3666 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3668 check_privileged(s);
3669 potential_page_fault(s);
3670 gen_helper_hsch(cpu_env, regs[1]);
3671 set_cc_static(s);
3672 return NO_EXIT;
3675 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3677 check_privileged(s);
3678 potential_page_fault(s);
3679 gen_helper_msch(cpu_env, regs[1], o->in2);
3680 set_cc_static(s);
3681 return NO_EXIT;
3684 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3686 check_privileged(s);
3687 potential_page_fault(s);
3688 gen_helper_rchp(cpu_env, regs[1]);
3689 set_cc_static(s);
3690 return NO_EXIT;
3693 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3695 check_privileged(s);
3696 potential_page_fault(s);
3697 gen_helper_rsch(cpu_env, regs[1]);
3698 set_cc_static(s);
3699 return NO_EXIT;
3702 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3704 check_privileged(s);
3705 potential_page_fault(s);
3706 gen_helper_ssch(cpu_env, regs[1], o->in2);
3707 set_cc_static(s);
3708 return NO_EXIT;
3711 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3713 check_privileged(s);
3714 potential_page_fault(s);
3715 gen_helper_stsch(cpu_env, regs[1], o->in2);
3716 set_cc_static(s);
3717 return NO_EXIT;
3720 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3722 check_privileged(s);
3723 potential_page_fault(s);
3724 gen_helper_tsch(cpu_env, regs[1], o->in2);
3725 set_cc_static(s);
3726 return NO_EXIT;
3729 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3731 check_privileged(s);
3732 potential_page_fault(s);
3733 gen_helper_chsc(cpu_env, o->in2);
3734 set_cc_static(s);
3735 return NO_EXIT;
3738 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3740 check_privileged(s);
3741 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3742 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3743 return NO_EXIT;
3746 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3748 uint64_t i2 = get_field(s->fields, i2);
3749 TCGv_i64 t;
3751 check_privileged(s);
3753 /* It is important to do what the instruction name says: STORE THEN.
3754 If we let the output hook perform the store then if we fault and
3755 restart, we'll have the wrong SYSTEM MASK in place. */
3756 t = tcg_temp_new_i64();
3757 tcg_gen_shri_i64(t, psw_mask, 56);
3758 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3759 tcg_temp_free_i64(t);
3761 if (s->fields->op == 0xac) {
3762 tcg_gen_andi_i64(psw_mask, psw_mask,
3763 (i2 << 56) | 0x00ffffffffffffffull);
3764 } else {
3765 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3767 return NO_EXIT;
3770 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3772 check_privileged(s);
3773 potential_page_fault(s);
3774 gen_helper_stura(cpu_env, o->in2, o->in1);
3775 return NO_EXIT;
3778 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3780 check_privileged(s);
3781 potential_page_fault(s);
3782 gen_helper_sturg(cpu_env, o->in2, o->in1);
3783 return NO_EXIT;
3785 #endif
3787 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3789 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3790 return NO_EXIT;
3793 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3795 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3796 return NO_EXIT;
3799 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3801 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3802 return NO_EXIT;
3805 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3807 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3808 return NO_EXIT;
3811 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3813 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3814 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3815 potential_page_fault(s);
3816 gen_helper_stam(cpu_env, r1, o->in2, r3);
3817 tcg_temp_free_i32(r1);
3818 tcg_temp_free_i32(r3);
3819 return NO_EXIT;
3822 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3824 int m3 = get_field(s->fields, m3);
3825 int pos, base = s->insn->data;
3826 TCGv_i64 tmp = tcg_temp_new_i64();
3828 pos = base + ctz32(m3) * 8;
3829 switch (m3) {
3830 case 0xf:
3831 /* Effectively a 32-bit store. */
3832 tcg_gen_shri_i64(tmp, o->in1, pos);
3833 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3834 break;
3836 case 0xc:
3837 case 0x6:
3838 case 0x3:
3839 /* Effectively a 16-bit store. */
3840 tcg_gen_shri_i64(tmp, o->in1, pos);
3841 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3842 break;
3844 case 0x8:
3845 case 0x4:
3846 case 0x2:
3847 case 0x1:
3848 /* Effectively an 8-bit store. */
3849 tcg_gen_shri_i64(tmp, o->in1, pos);
3850 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3851 break;
3853 default:
3854 /* This is going to be a sequence of shifts and stores. */
3855 pos = base + 32 - 8;
3856 while (m3) {
3857 if (m3 & 0x8) {
3858 tcg_gen_shri_i64(tmp, o->in1, pos);
3859 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3860 tcg_gen_addi_i64(o->in2, o->in2, 1);
3862 m3 = (m3 << 1) & 0xf;
3863 pos -= 8;
3865 break;
3867 tcg_temp_free_i64(tmp);
3868 return NO_EXIT;
3871 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3873 int r1 = get_field(s->fields, r1);
3874 int r3 = get_field(s->fields, r3);
3875 int size = s->insn->data;
3876 TCGv_i64 tsize = tcg_const_i64(size);
3878 while (1) {
3879 if (size == 8) {
3880 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3881 } else {
3882 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3884 if (r1 == r3) {
3885 break;
3887 tcg_gen_add_i64(o->in2, o->in2, tsize);
3888 r1 = (r1 + 1) & 15;
3891 tcg_temp_free_i64(tsize);
3892 return NO_EXIT;
3895 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3897 int r1 = get_field(s->fields, r1);
3898 int r3 = get_field(s->fields, r3);
3899 TCGv_i64 t = tcg_temp_new_i64();
3900 TCGv_i64 t4 = tcg_const_i64(4);
3901 TCGv_i64 t32 = tcg_const_i64(32);
3903 while (1) {
3904 tcg_gen_shl_i64(t, regs[r1], t32);
3905 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3906 if (r1 == r3) {
3907 break;
3909 tcg_gen_add_i64(o->in2, o->in2, t4);
3910 r1 = (r1 + 1) & 15;
3913 tcg_temp_free_i64(t);
3914 tcg_temp_free_i64(t4);
3915 tcg_temp_free_i64(t32);
3916 return NO_EXIT;
3919 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3921 potential_page_fault(s);
3922 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3923 set_cc_static(s);
3924 return_low128(o->in2);
3925 return NO_EXIT;
3928 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3930 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3931 return NO_EXIT;
3934 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3936 DisasCompare cmp;
3937 TCGv_i64 borrow;
3939 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3941 /* The !borrow flag is the msb of CC. Since we want the inverse of
3942 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3943 disas_jcc(s, &cmp, 8 | 4);
3944 borrow = tcg_temp_new_i64();
3945 if (cmp.is_64) {
3946 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3947 } else {
3948 TCGv_i32 t = tcg_temp_new_i32();
3949 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3950 tcg_gen_extu_i32_i64(borrow, t);
3951 tcg_temp_free_i32(t);
3953 free_compare(&cmp);
3955 tcg_gen_sub_i64(o->out, o->out, borrow);
3956 tcg_temp_free_i64(borrow);
3957 return NO_EXIT;
3960 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3962 TCGv_i32 t;
3964 update_psw_addr(s);
3965 update_cc_op(s);
3967 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3968 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3969 tcg_temp_free_i32(t);
3971 t = tcg_const_i32(s->next_pc - s->pc);
3972 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3973 tcg_temp_free_i32(t);
3975 gen_exception(EXCP_SVC);
3976 return EXIT_NORETURN;
3979 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3981 gen_helper_tceb(cc_op, o->in1, o->in2);
3982 set_cc_static(s);
3983 return NO_EXIT;
3986 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3988 gen_helper_tcdb(cc_op, o->in1, o->in2);
3989 set_cc_static(s);
3990 return NO_EXIT;
3993 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3995 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3996 set_cc_static(s);
3997 return NO_EXIT;
4000 #ifndef CONFIG_USER_ONLY
4001 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4003 potential_page_fault(s);
4004 gen_helper_tprot(cc_op, o->addr1, o->in2);
4005 set_cc_static(s);
4006 return NO_EXIT;
4008 #endif
4010 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4012 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4013 potential_page_fault(s);
4014 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4015 tcg_temp_free_i32(l);
4016 set_cc_static(s);
4017 return NO_EXIT;
4020 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4022 potential_page_fault(s);
4023 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4024 return_low128(o->out2);
4025 set_cc_static(s);
4026 return NO_EXIT;
4029 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4031 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4032 potential_page_fault(s);
4033 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4034 tcg_temp_free_i32(l);
4035 set_cc_static(s);
4036 return NO_EXIT;
4039 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4041 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4042 potential_page_fault(s);
4043 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4044 tcg_temp_free_i32(l);
4045 return NO_EXIT;
4048 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4050 int d1 = get_field(s->fields, d1);
4051 int d2 = get_field(s->fields, d2);
4052 int b1 = get_field(s->fields, b1);
4053 int b2 = get_field(s->fields, b2);
4054 int l = get_field(s->fields, l1);
4055 TCGv_i32 t32;
4057 o->addr1 = get_address(s, 0, b1, d1);
4059 /* If the addresses are identical, this is a store/memset of zero. */
4060 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4061 o->in2 = tcg_const_i64(0);
4063 l++;
4064 while (l >= 8) {
4065 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4066 l -= 8;
4067 if (l > 0) {
4068 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4071 if (l >= 4) {
4072 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4073 l -= 4;
4074 if (l > 0) {
4075 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4078 if (l >= 2) {
4079 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4080 l -= 2;
4081 if (l > 0) {
4082 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4085 if (l) {
4086 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4088 gen_op_movi_cc(s, 0);
4089 return NO_EXIT;
4092 /* But in general we'll defer to a helper. */
4093 o->in2 = get_address(s, 0, b2, d2);
4094 t32 = tcg_const_i32(l);
4095 potential_page_fault(s);
4096 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4097 tcg_temp_free_i32(t32);
4098 set_cc_static(s);
4099 return NO_EXIT;
4102 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4104 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4105 return NO_EXIT;
4108 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4110 int shift = s->insn->data & 0xff;
4111 int size = s->insn->data >> 8;
4112 uint64_t mask = ((1ull << size) - 1) << shift;
4114 assert(!o->g_in2);
4115 tcg_gen_shli_i64(o->in2, o->in2, shift);
4116 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4118 /* Produce the CC from only the bits manipulated. */
4119 tcg_gen_andi_i64(cc_dst, o->out, mask);
4120 set_cc_nz_u64(s, cc_dst);
4121 return NO_EXIT;
4124 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4126 o->out = tcg_const_i64(0);
4127 return NO_EXIT;
4130 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4132 o->out = tcg_const_i64(0);
4133 o->out2 = o->out;
4134 o->g_out2 = true;
4135 return NO_EXIT;
4138 /* ====================================================================== */
4139 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4140 the original inputs), update the various cc data structures in order to
4141 be able to compute the new condition code. */
4143 static void cout_abs32(DisasContext *s, DisasOps *o)
4145 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4148 static void cout_abs64(DisasContext *s, DisasOps *o)
4150 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4153 static void cout_adds32(DisasContext *s, DisasOps *o)
4155 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4158 static void cout_adds64(DisasContext *s, DisasOps *o)
4160 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4163 static void cout_addu32(DisasContext *s, DisasOps *o)
4165 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4168 static void cout_addu64(DisasContext *s, DisasOps *o)
4170 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4173 static void cout_addc32(DisasContext *s, DisasOps *o)
4175 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4178 static void cout_addc64(DisasContext *s, DisasOps *o)
4180 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4183 static void cout_cmps32(DisasContext *s, DisasOps *o)
4185 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4188 static void cout_cmps64(DisasContext *s, DisasOps *o)
4190 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4193 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4195 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4198 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4200 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4203 static void cout_f32(DisasContext *s, DisasOps *o)
4205 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4208 static void cout_f64(DisasContext *s, DisasOps *o)
4210 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4213 static void cout_f128(DisasContext *s, DisasOps *o)
4215 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4218 static void cout_nabs32(DisasContext *s, DisasOps *o)
4220 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4223 static void cout_nabs64(DisasContext *s, DisasOps *o)
4225 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4228 static void cout_neg32(DisasContext *s, DisasOps *o)
4230 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4233 static void cout_neg64(DisasContext *s, DisasOps *o)
4235 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4238 static void cout_nz32(DisasContext *s, DisasOps *o)
4240 tcg_gen_ext32u_i64(cc_dst, o->out);
4241 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4244 static void cout_nz64(DisasContext *s, DisasOps *o)
4246 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4249 static void cout_s32(DisasContext *s, DisasOps *o)
4251 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4254 static void cout_s64(DisasContext *s, DisasOps *o)
4256 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4259 static void cout_subs32(DisasContext *s, DisasOps *o)
4261 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4264 static void cout_subs64(DisasContext *s, DisasOps *o)
4266 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4269 static void cout_subu32(DisasContext *s, DisasOps *o)
4271 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4274 static void cout_subu64(DisasContext *s, DisasOps *o)
4276 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4279 static void cout_subb32(DisasContext *s, DisasOps *o)
4281 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4284 static void cout_subb64(DisasContext *s, DisasOps *o)
4286 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4289 static void cout_tm32(DisasContext *s, DisasOps *o)
4291 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4294 static void cout_tm64(DisasContext *s, DisasOps *o)
4296 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4299 /* ====================================================================== */
4300 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4301 with the TCG register to which we will write. Used in combination with
4302 the "wout" generators, in some cases we need a new temporary, and in
4303 some cases we can write to a TCG global. */
4305 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4307 o->out = tcg_temp_new_i64();
4309 #define SPEC_prep_new 0
4311 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4313 o->out = tcg_temp_new_i64();
4314 o->out2 = tcg_temp_new_i64();
4316 #define SPEC_prep_new_P 0
4318 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4320 o->out = regs[get_field(f, r1)];
4321 o->g_out = true;
4323 #define SPEC_prep_r1 0
4325 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4327 int r1 = get_field(f, r1);
4328 o->out = regs[r1];
4329 o->out2 = regs[r1 + 1];
4330 o->g_out = o->g_out2 = true;
4332 #define SPEC_prep_r1_P SPEC_r1_even
4334 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4336 o->out = fregs[get_field(f, r1)];
4337 o->g_out = true;
4339 #define SPEC_prep_f1 0
4341 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4343 int r1 = get_field(f, r1);
4344 o->out = fregs[r1];
4345 o->out2 = fregs[r1 + 2];
4346 o->g_out = o->g_out2 = true;
4348 #define SPEC_prep_x1 SPEC_r1_f128
4350 /* ====================================================================== */
4351 /* The "Write OUTput" generators. These generally perform some non-trivial
4352 copy of data to TCG globals, or to main memory. The trivial cases are
4353 generally handled by having a "prep" generator install the TCG global
4354 as the destination of the operation. */
4356 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4358 store_reg(get_field(f, r1), o->out);
4360 #define SPEC_wout_r1 0
4362 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4364 int r1 = get_field(f, r1);
4365 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4367 #define SPEC_wout_r1_8 0
4369 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4371 int r1 = get_field(f, r1);
4372 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4374 #define SPEC_wout_r1_16 0
4376 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4378 store_reg32_i64(get_field(f, r1), o->out);
4380 #define SPEC_wout_r1_32 0
4382 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4384 store_reg32h_i64(get_field(f, r1), o->out);
4386 #define SPEC_wout_r1_32h 0
4388 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4390 int r1 = get_field(f, r1);
4391 store_reg32_i64(r1, o->out);
4392 store_reg32_i64(r1 + 1, o->out2);
4394 #define SPEC_wout_r1_P32 SPEC_r1_even
4396 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4398 int r1 = get_field(f, r1);
4399 store_reg32_i64(r1 + 1, o->out);
4400 tcg_gen_shri_i64(o->out, o->out, 32);
4401 store_reg32_i64(r1, o->out);
4403 #define SPEC_wout_r1_D32 SPEC_r1_even
4405 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4407 store_freg32_i64(get_field(f, r1), o->out);
4409 #define SPEC_wout_e1 0
4411 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4413 store_freg(get_field(f, r1), o->out);
4415 #define SPEC_wout_f1 0
4417 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4419 int f1 = get_field(s->fields, r1);
4420 store_freg(f1, o->out);
4421 store_freg(f1 + 2, o->out2);
4423 #define SPEC_wout_x1 SPEC_r1_f128
4425 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4427 if (get_field(f, r1) != get_field(f, r2)) {
4428 store_reg32_i64(get_field(f, r1), o->out);
4431 #define SPEC_wout_cond_r1r2_32 0
4433 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4435 if (get_field(f, r1) != get_field(f, r2)) {
4436 store_freg32_i64(get_field(f, r1), o->out);
4439 #define SPEC_wout_cond_e1e2 0
4441 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4443 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4445 #define SPEC_wout_m1_8 0
4447 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4449 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4451 #define SPEC_wout_m1_16 0
4453 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4455 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4457 #define SPEC_wout_m1_32 0
4459 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4461 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4463 #define SPEC_wout_m1_64 0
4465 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4467 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4469 #define SPEC_wout_m2_32 0
4471 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4473 /* XXX release reservation */
4474 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4475 store_reg32_i64(get_field(f, r1), o->in2);
4477 #define SPEC_wout_m2_32_r1_atomic 0
4479 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4481 /* XXX release reservation */
4482 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4483 store_reg(get_field(f, r1), o->in2);
4485 #define SPEC_wout_m2_64_r1_atomic 0
4487 /* ====================================================================== */
4488 /* The "INput 1" generators. These load the first operand to an insn. */
4490 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4492 o->in1 = load_reg(get_field(f, r1));
4494 #define SPEC_in1_r1 0
4496 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4498 o->in1 = regs[get_field(f, r1)];
4499 o->g_in1 = true;
4501 #define SPEC_in1_r1_o 0
4503 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4505 o->in1 = tcg_temp_new_i64();
4506 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4508 #define SPEC_in1_r1_32s 0
4510 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4512 o->in1 = tcg_temp_new_i64();
4513 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4515 #define SPEC_in1_r1_32u 0
4517 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4519 o->in1 = tcg_temp_new_i64();
4520 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4522 #define SPEC_in1_r1_sr32 0
4524 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4526 o->in1 = load_reg(get_field(f, r1) + 1);
4528 #define SPEC_in1_r1p1 SPEC_r1_even
4530 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4532 o->in1 = tcg_temp_new_i64();
4533 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4535 #define SPEC_in1_r1p1_32s SPEC_r1_even
4537 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4539 o->in1 = tcg_temp_new_i64();
4540 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4542 #define SPEC_in1_r1p1_32u SPEC_r1_even
4544 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4546 int r1 = get_field(f, r1);
4547 o->in1 = tcg_temp_new_i64();
4548 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4550 #define SPEC_in1_r1_D32 SPEC_r1_even
4552 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4554 o->in1 = load_reg(get_field(f, r2));
4556 #define SPEC_in1_r2 0
4558 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4560 o->in1 = tcg_temp_new_i64();
4561 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4563 #define SPEC_in1_r2_sr32 0
4565 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4567 o->in1 = load_reg(get_field(f, r3));
4569 #define SPEC_in1_r3 0
4571 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4573 o->in1 = regs[get_field(f, r3)];
4574 o->g_in1 = true;
4576 #define SPEC_in1_r3_o 0
4578 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4580 o->in1 = tcg_temp_new_i64();
4581 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4583 #define SPEC_in1_r3_32s 0
4585 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4587 o->in1 = tcg_temp_new_i64();
4588 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4590 #define SPEC_in1_r3_32u 0
4592 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4594 int r3 = get_field(f, r3);
4595 o->in1 = tcg_temp_new_i64();
4596 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4598 #define SPEC_in1_r3_D32 SPEC_r3_even
4600 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4602 o->in1 = load_freg32_i64(get_field(f, r1));
4604 #define SPEC_in1_e1 0
4606 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4608 o->in1 = fregs[get_field(f, r1)];
4609 o->g_in1 = true;
4611 #define SPEC_in1_f1_o 0
4613 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4615 int r1 = get_field(f, r1);
4616 o->out = fregs[r1];
4617 o->out2 = fregs[r1 + 2];
4618 o->g_out = o->g_out2 = true;
4620 #define SPEC_in1_x1_o SPEC_r1_f128
4622 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4624 o->in1 = fregs[get_field(f, r3)];
4625 o->g_in1 = true;
4627 #define SPEC_in1_f3_o 0
4629 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4631 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4633 #define SPEC_in1_la1 0
4635 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4637 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4638 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4640 #define SPEC_in1_la2 0
4642 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4644 in1_la1(s, f, o);
4645 o->in1 = tcg_temp_new_i64();
4646 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4648 #define SPEC_in1_m1_8u 0
4650 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4652 in1_la1(s, f, o);
4653 o->in1 = tcg_temp_new_i64();
4654 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4656 #define SPEC_in1_m1_16s 0
4658 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4660 in1_la1(s, f, o);
4661 o->in1 = tcg_temp_new_i64();
4662 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4664 #define SPEC_in1_m1_16u 0
4666 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4668 in1_la1(s, f, o);
4669 o->in1 = tcg_temp_new_i64();
4670 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4672 #define SPEC_in1_m1_32s 0
4674 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4676 in1_la1(s, f, o);
4677 o->in1 = tcg_temp_new_i64();
4678 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4680 #define SPEC_in1_m1_32u 0
4682 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4684 in1_la1(s, f, o);
4685 o->in1 = tcg_temp_new_i64();
4686 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4688 #define SPEC_in1_m1_64 0
4690 /* ====================================================================== */
4691 /* The "INput 2" generators. These load the second operand to an insn. */
4693 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4695 o->in2 = regs[get_field(f, r1)];
4696 o->g_in2 = true;
4698 #define SPEC_in2_r1_o 0
4700 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4702 o->in2 = tcg_temp_new_i64();
4703 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4705 #define SPEC_in2_r1_16u 0
4707 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4709 o->in2 = tcg_temp_new_i64();
4710 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4712 #define SPEC_in2_r1_32u 0
4714 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4716 int r1 = get_field(f, r1);
4717 o->in2 = tcg_temp_new_i64();
4718 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4720 #define SPEC_in2_r1_D32 SPEC_r1_even
4722 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4724 o->in2 = load_reg(get_field(f, r2));
4726 #define SPEC_in2_r2 0
4728 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4730 o->in2 = regs[get_field(f, r2)];
4731 o->g_in2 = true;
4733 #define SPEC_in2_r2_o 0
4735 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4737 int r2 = get_field(f, r2);
4738 if (r2 != 0) {
4739 o->in2 = load_reg(r2);
4742 #define SPEC_in2_r2_nz 0
4744 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4746 o->in2 = tcg_temp_new_i64();
4747 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4749 #define SPEC_in2_r2_8s 0
4751 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4753 o->in2 = tcg_temp_new_i64();
4754 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4756 #define SPEC_in2_r2_8u 0
4758 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4760 o->in2 = tcg_temp_new_i64();
4761 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4763 #define SPEC_in2_r2_16s 0
4765 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4767 o->in2 = tcg_temp_new_i64();
4768 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4770 #define SPEC_in2_r2_16u 0
4772 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4774 o->in2 = load_reg(get_field(f, r3));
4776 #define SPEC_in2_r3 0
4778 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4780 o->in2 = tcg_temp_new_i64();
4781 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4783 #define SPEC_in2_r3_sr32 0
4785 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4787 o->in2 = tcg_temp_new_i64();
4788 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4790 #define SPEC_in2_r2_32s 0
4792 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4794 o->in2 = tcg_temp_new_i64();
4795 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4797 #define SPEC_in2_r2_32u 0
4799 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4801 o->in2 = tcg_temp_new_i64();
4802 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4804 #define SPEC_in2_r2_sr32 0
4806 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4808 o->in2 = load_freg32_i64(get_field(f, r2));
4810 #define SPEC_in2_e2 0
4812 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4814 o->in2 = fregs[get_field(f, r2)];
4815 o->g_in2 = true;
4817 #define SPEC_in2_f2_o 0
4819 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4821 int r2 = get_field(f, r2);
4822 o->in1 = fregs[r2];
4823 o->in2 = fregs[r2 + 2];
4824 o->g_in1 = o->g_in2 = true;
4826 #define SPEC_in2_x2_o SPEC_r2_f128
4828 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4830 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4832 #define SPEC_in2_ra2 0
4834 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4836 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4837 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4839 #define SPEC_in2_a2 0
4841 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4843 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4845 #define SPEC_in2_ri2 0
4847 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4849 help_l2_shift(s, f, o, 31);
4851 #define SPEC_in2_sh32 0
4853 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4855 help_l2_shift(s, f, o, 63);
4857 #define SPEC_in2_sh64 0
4859 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4861 in2_a2(s, f, o);
4862 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4864 #define SPEC_in2_m2_8u 0
4866 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4868 in2_a2(s, f, o);
4869 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4871 #define SPEC_in2_m2_16s 0
4873 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4875 in2_a2(s, f, o);
4876 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4878 #define SPEC_in2_m2_16u 0
4880 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4882 in2_a2(s, f, o);
4883 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4885 #define SPEC_in2_m2_32s 0
4887 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4889 in2_a2(s, f, o);
4890 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4892 #define SPEC_in2_m2_32u 0
4894 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4896 in2_a2(s, f, o);
4897 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4899 #define SPEC_in2_m2_64 0
4901 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4903 in2_ri2(s, f, o);
4904 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4906 #define SPEC_in2_mri2_16u 0
4908 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4910 in2_ri2(s, f, o);
4911 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4913 #define SPEC_in2_mri2_32s 0
4915 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4917 in2_ri2(s, f, o);
4918 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4920 #define SPEC_in2_mri2_32u 0
4922 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4924 in2_ri2(s, f, o);
4925 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4927 #define SPEC_in2_mri2_64 0
4929 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4931 /* XXX should reserve the address */
4932 in1_la2(s, f, o);
4933 o->in2 = tcg_temp_new_i64();
4934 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4936 #define SPEC_in2_m2_32s_atomic 0
4938 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4940 /* XXX should reserve the address */
4941 in1_la2(s, f, o);
4942 o->in2 = tcg_temp_new_i64();
4943 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4945 #define SPEC_in2_m2_64_atomic 0
4947 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4949 o->in2 = tcg_const_i64(get_field(f, i2));
4951 #define SPEC_in2_i2 0
4953 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4955 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4957 #define SPEC_in2_i2_8u 0
4959 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4961 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4963 #define SPEC_in2_i2_16u 0
4965 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4967 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4969 #define SPEC_in2_i2_32u 0
4971 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4973 uint64_t i2 = (uint16_t)get_field(f, i2);
4974 o->in2 = tcg_const_i64(i2 << s->insn->data);
4976 #define SPEC_in2_i2_16u_shl 0
4978 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4980 uint64_t i2 = (uint32_t)get_field(f, i2);
4981 o->in2 = tcg_const_i64(i2 << s->insn->data);
4983 #define SPEC_in2_i2_32u_shl 0
4985 #ifndef CONFIG_USER_ONLY
4986 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4988 o->in2 = tcg_const_i64(s->fields->raw_insn);
4990 #define SPEC_in2_insn 0
4991 #endif
4993 /* ====================================================================== */
4995 /* Find opc within the table of insns. This is formulated as a switch
4996 statement so that (1) we get compile-time notice of cut-paste errors
4997 for duplicated opcodes, and (2) the compiler generates the binary
4998 search tree, rather than us having to post-process the table. */
5000 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5001 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5003 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5005 enum DisasInsnEnum {
5006 #include "insn-data.def"
5009 #undef D
5010 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5011 .opc = OPC, \
5012 .fmt = FMT_##FT, \
5013 .fac = FAC_##FC, \
5014 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5015 .name = #NM, \
5016 .help_in1 = in1_##I1, \
5017 .help_in2 = in2_##I2, \
5018 .help_prep = prep_##P, \
5019 .help_wout = wout_##W, \
5020 .help_cout = cout_##CC, \
5021 .help_op = op_##OP, \
5022 .data = D \
5025 /* Allow 0 to be used for NULL in the table below. */
5026 #define in1_0 NULL
5027 #define in2_0 NULL
5028 #define prep_0 NULL
5029 #define wout_0 NULL
5030 #define cout_0 NULL
5031 #define op_0 NULL
5033 #define SPEC_in1_0 0
5034 #define SPEC_in2_0 0
5035 #define SPEC_prep_0 0
5036 #define SPEC_wout_0 0
5038 static const DisasInsn insn_info[] = {
5039 #include "insn-data.def"
5042 #undef D
5043 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5044 case OPC: return &insn_info[insn_ ## NM];
5046 static const DisasInsn *lookup_opc(uint16_t opc)
5048 switch (opc) {
5049 #include "insn-data.def"
5050 default:
5051 return NULL;
5055 #undef D
5056 #undef C
5058 /* Extract a field from the insn. The INSN should be left-aligned in
5059 the uint64_t so that we can more easily utilize the big-bit-endian
5060 definitions we extract from the Principals of Operation. */
5062 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5064 uint32_t r, m;
5066 if (f->size == 0) {
5067 return;
5070 /* Zero extract the field from the insn. */
5071 r = (insn << f->beg) >> (64 - f->size);
5073 /* Sign-extend, or un-swap the field as necessary. */
5074 switch (f->type) {
5075 case 0: /* unsigned */
5076 break;
5077 case 1: /* signed */
5078 assert(f->size <= 32);
5079 m = 1u << (f->size - 1);
5080 r = (r ^ m) - m;
5081 break;
5082 case 2: /* dl+dh split, signed 20 bit. */
5083 r = ((int8_t)r << 12) | (r >> 8);
5084 break;
5085 default:
5086 abort();
5089 /* Validate that the "compressed" encoding we selected above is valid.
5090 I.e. we havn't make two different original fields overlap. */
5091 assert(((o->presentC >> f->indexC) & 1) == 0);
5092 o->presentC |= 1 << f->indexC;
5093 o->presentO |= 1 << f->indexO;
5095 o->c[f->indexC] = r;
5098 /* Lookup the insn at the current PC, extracting the operands into O and
5099 returning the info struct for the insn. Returns NULL for invalid insn. */
5101 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5102 DisasFields *f)
5104 uint64_t insn, pc = s->pc;
5105 int op, op2, ilen;
5106 const DisasInsn *info;
5108 insn = ld_code2(env, pc);
5109 op = (insn >> 8) & 0xff;
5110 ilen = get_ilen(op);
5111 s->next_pc = s->pc + ilen;
5113 switch (ilen) {
5114 case 2:
5115 insn = insn << 48;
5116 break;
5117 case 4:
5118 insn = ld_code4(env, pc) << 32;
5119 break;
5120 case 6:
5121 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5122 break;
5123 default:
5124 abort();
5127 /* We can't actually determine the insn format until we've looked up
5128 the full insn opcode. Which we can't do without locating the
5129 secondary opcode. Assume by default that OP2 is at bit 40; for
5130 those smaller insns that don't actually have a secondary opcode
5131 this will correctly result in OP2 = 0. */
5132 switch (op) {
5133 case 0x01: /* E */
5134 case 0x80: /* S */
5135 case 0x82: /* S */
5136 case 0x93: /* S */
5137 case 0xb2: /* S, RRF, RRE */
5138 case 0xb3: /* RRE, RRD, RRF */
5139 case 0xb9: /* RRE, RRF */
5140 case 0xe5: /* SSE, SIL */
5141 op2 = (insn << 8) >> 56;
5142 break;
5143 case 0xa5: /* RI */
5144 case 0xa7: /* RI */
5145 case 0xc0: /* RIL */
5146 case 0xc2: /* RIL */
5147 case 0xc4: /* RIL */
5148 case 0xc6: /* RIL */
5149 case 0xc8: /* SSF */
5150 case 0xcc: /* RIL */
5151 op2 = (insn << 12) >> 60;
5152 break;
5153 case 0xd0 ... 0xdf: /* SS */
5154 case 0xe1: /* SS */
5155 case 0xe2: /* SS */
5156 case 0xe8: /* SS */
5157 case 0xe9: /* SS */
5158 case 0xea: /* SS */
5159 case 0xee ... 0xf3: /* SS */
5160 case 0xf8 ... 0xfd: /* SS */
5161 op2 = 0;
5162 break;
5163 default:
5164 op2 = (insn << 40) >> 56;
5165 break;
5168 memset(f, 0, sizeof(*f));
5169 f->raw_insn = insn;
5170 f->op = op;
5171 f->op2 = op2;
5173 /* Lookup the instruction. */
5174 info = lookup_opc(op << 8 | op2);
5176 /* If we found it, extract the operands. */
5177 if (info != NULL) {
5178 DisasFormat fmt = info->fmt;
5179 int i;
5181 for (i = 0; i < NUM_C_FIELD; ++i) {
5182 extract_field(f, &format_info[fmt].op[i], insn);
5185 return info;
5188 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5190 const DisasInsn *insn;
5191 ExitStatus ret = NO_EXIT;
5192 DisasFields f;
5193 DisasOps o;
5195 /* Search for the insn in the table. */
5196 insn = extract_insn(env, s, &f);
5198 /* Not found means unimplemented/illegal opcode. */
5199 if (insn == NULL) {
5200 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5201 f.op, f.op2);
5202 gen_illegal_opcode(s);
5203 return EXIT_NORETURN;
5206 #ifndef CONFIG_USER_ONLY
5207 if (s->tb->flags & FLAG_MASK_PER) {
5208 TCGv_i64 addr = tcg_const_i64(s->pc);
5209 gen_helper_per_ifetch(cpu_env, addr);
5210 tcg_temp_free_i64(addr);
5212 #endif
5214 /* Check for insn specification exceptions. */
5215 if (insn->spec) {
5216 int spec = insn->spec, excp = 0, r;
5218 if (spec & SPEC_r1_even) {
5219 r = get_field(&f, r1);
5220 if (r & 1) {
5221 excp = PGM_SPECIFICATION;
5224 if (spec & SPEC_r2_even) {
5225 r = get_field(&f, r2);
5226 if (r & 1) {
5227 excp = PGM_SPECIFICATION;
5230 if (spec & SPEC_r3_even) {
5231 r = get_field(&f, r3);
5232 if (r & 1) {
5233 excp = PGM_SPECIFICATION;
5236 if (spec & SPEC_r1_f128) {
5237 r = get_field(&f, r1);
5238 if (r > 13) {
5239 excp = PGM_SPECIFICATION;
5242 if (spec & SPEC_r2_f128) {
5243 r = get_field(&f, r2);
5244 if (r > 13) {
5245 excp = PGM_SPECIFICATION;
5248 if (excp) {
5249 gen_program_exception(s, excp);
5250 return EXIT_NORETURN;
5254 /* Set up the strutures we use to communicate with the helpers. */
5255 s->insn = insn;
5256 s->fields = &f;
5257 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5258 TCGV_UNUSED_I64(o.out);
5259 TCGV_UNUSED_I64(o.out2);
5260 TCGV_UNUSED_I64(o.in1);
5261 TCGV_UNUSED_I64(o.in2);
5262 TCGV_UNUSED_I64(o.addr1);
5264 /* Implement the instruction. */
5265 if (insn->help_in1) {
5266 insn->help_in1(s, &f, &o);
5268 if (insn->help_in2) {
5269 insn->help_in2(s, &f, &o);
5271 if (insn->help_prep) {
5272 insn->help_prep(s, &f, &o);
5274 if (insn->help_op) {
5275 ret = insn->help_op(s, &o);
5277 if (insn->help_wout) {
5278 insn->help_wout(s, &f, &o);
5280 if (insn->help_cout) {
5281 insn->help_cout(s, &o);
5284 /* Free any temporaries created by the helpers. */
5285 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5286 tcg_temp_free_i64(o.out);
5288 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5289 tcg_temp_free_i64(o.out2);
5291 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5292 tcg_temp_free_i64(o.in1);
5294 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5295 tcg_temp_free_i64(o.in2);
5297 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5298 tcg_temp_free_i64(o.addr1);
5301 #ifndef CONFIG_USER_ONLY
5302 if (s->tb->flags & FLAG_MASK_PER) {
5303 /* An exception might be triggered, save PSW if not already done. */
5304 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5305 tcg_gen_movi_i64(psw_addr, s->next_pc);
5308 /* Save off cc. */
5309 update_cc_op(s);
5311 /* Call the helper to check for a possible PER exception. */
5312 gen_helper_per_check_exception(cpu_env);
5314 #endif
5316 /* Advance to the next instruction. */
5317 s->pc = s->next_pc;
5318 return ret;
5321 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5323 S390CPU *cpu = s390_env_get_cpu(env);
5324 CPUState *cs = CPU(cpu);
5325 DisasContext dc;
5326 target_ulong pc_start;
5327 uint64_t next_page_start;
5328 int num_insns, max_insns;
5329 ExitStatus status;
5330 bool do_debug;
5332 pc_start = tb->pc;
5334 /* 31-bit mode */
5335 if (!(tb->flags & FLAG_MASK_64)) {
5336 pc_start &= 0x7fffffff;
5339 dc.tb = tb;
5340 dc.pc = pc_start;
5341 dc.cc_op = CC_OP_DYNAMIC;
5342 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5344 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5346 num_insns = 0;
5347 max_insns = tb->cflags & CF_COUNT_MASK;
5348 if (max_insns == 0) {
5349 max_insns = CF_COUNT_MASK;
5351 if (max_insns > TCG_MAX_INSNS) {
5352 max_insns = TCG_MAX_INSNS;
5355 gen_tb_start(tb);
5357 do {
5358 tcg_gen_insn_start(dc.pc, dc.cc_op);
5359 num_insns++;
5361 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5362 status = EXIT_PC_STALE;
5363 do_debug = true;
5364 /* The address covered by the breakpoint must be included in
5365 [tb->pc, tb->pc + tb->size) in order to for it to be
5366 properly cleared -- thus we increment the PC here so that
5367 the logic setting tb->size below does the right thing. */
5368 dc.pc += 2;
5369 break;
5372 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5373 gen_io_start();
5376 status = NO_EXIT;
5377 if (status == NO_EXIT) {
5378 status = translate_one(env, &dc);
5381 /* If we reach a page boundary, are single stepping,
5382 or exhaust instruction count, stop generation. */
5383 if (status == NO_EXIT
5384 && (dc.pc >= next_page_start
5385 || tcg_op_buf_full()
5386 || num_insns >= max_insns
5387 || singlestep
5388 || cs->singlestep_enabled)) {
5389 status = EXIT_PC_STALE;
5391 } while (status == NO_EXIT);
5393 if (tb->cflags & CF_LAST_IO) {
5394 gen_io_end();
5397 switch (status) {
5398 case EXIT_GOTO_TB:
5399 case EXIT_NORETURN:
5400 break;
5401 case EXIT_PC_STALE:
5402 update_psw_addr(&dc);
5403 /* FALLTHRU */
5404 case EXIT_PC_UPDATED:
5405 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5406 cc op type is in env */
5407 update_cc_op(&dc);
5408 /* Exit the TB, either by raising a debug exception or by return. */
5409 if (do_debug) {
5410 gen_exception(EXCP_DEBUG);
5411 } else {
5412 tcg_gen_exit_tb(0);
5414 break;
5415 default:
5416 abort();
5419 gen_tb_end(tb, num_insns);
5421 tb->size = dc.pc - pc_start;
5422 tb->icount = num_insns;
5424 #if defined(S390X_DEBUG_DISAS)
5425 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5426 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5427 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5428 qemu_log("\n");
5430 #endif
5433 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5434 target_ulong *data)
5436 int cc_op = data[1];
5437 env->psw.addr = data[0];
5438 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5439 env->cc_op = cc_op;