target-alpha: make cpu-qom.h not target specific
[qemu/ar7.git] / target-s390x / translate.c
blobe99eb5cb01699d5b7e43d08ec23ad8e0df175b4e
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "tcg-op.h"
35 #include "qemu/log.h"
36 #include "qemu/host-utils.h"
37 #include "exec/cpu_ldst.h"
39 /* global register indexes */
40 static TCGv_env cpu_env;
42 #include "exec/gen-icount.h"
43 #include "exec/helper-proto.h"
44 #include "exec/helper-gen.h"
46 #include "trace-tcg.h"
47 #include "exec/log.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
55 struct DisasContext {
56 struct TranslationBlock *tb;
57 const DisasInsn *insn;
58 DisasFields *fields;
59 uint64_t pc, next_pc;
60 enum cc_op cc_op;
61 bool singlestep_enabled;
64 /* Information carried about a condition to be evaluated. */
65 typedef struct {
66 TCGCond cond:8;
67 bool is_64;
68 bool g1;
69 bool g2;
70 union {
71 struct { TCGv_i64 a, b; } s64;
72 struct { TCGv_i32 a, b; } s32;
73 } u;
74 } DisasCompare;
76 #define DISAS_EXCP 4
78 #ifdef DEBUG_INLINE_BRANCHES
79 static uint64_t inline_branch_hit[CC_OP_MAX];
80 static uint64_t inline_branch_miss[CC_OP_MAX];
81 #endif
83 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
85 if (!(s->tb->flags & FLAG_MASK_64)) {
86 if (s->tb->flags & FLAG_MASK_32) {
87 return pc | 0x80000000;
90 return pc;
93 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
94 int flags)
96 S390CPU *cpu = S390_CPU(cs);
97 CPUS390XState *env = &cpu->env;
98 int i;
100 if (env->cc_op > 3) {
101 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
102 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
103 } else {
104 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
105 env->psw.mask, env->psw.addr, env->cc_op);
108 for (i = 0; i < 16; i++) {
109 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
110 if ((i % 4) == 3) {
111 cpu_fprintf(f, "\n");
112 } else {
113 cpu_fprintf(f, " ");
117 for (i = 0; i < 16; i++) {
118 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
119 if ((i % 4) == 3) {
120 cpu_fprintf(f, "\n");
121 } else {
122 cpu_fprintf(f, " ");
126 for (i = 0; i < 32; i++) {
127 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
128 env->vregs[i][0].ll, env->vregs[i][1].ll);
129 cpu_fprintf(f, (i % 2) ? "\n" : " ");
132 #ifndef CONFIG_USER_ONLY
133 for (i = 0; i < 16; i++) {
134 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
135 if ((i % 4) == 3) {
136 cpu_fprintf(f, "\n");
137 } else {
138 cpu_fprintf(f, " ");
141 #endif
143 #ifdef DEBUG_INLINE_BRANCHES
144 for (i = 0; i < CC_OP_MAX; i++) {
145 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
146 inline_branch_miss[i], inline_branch_hit[i]);
148 #endif
150 cpu_fprintf(f, "\n");
153 static TCGv_i64 psw_addr;
154 static TCGv_i64 psw_mask;
155 static TCGv_i64 gbea;
157 static TCGv_i32 cc_op;
158 static TCGv_i64 cc_src;
159 static TCGv_i64 cc_dst;
160 static TCGv_i64 cc_vr;
162 static char cpu_reg_names[32][4];
163 static TCGv_i64 regs[16];
164 static TCGv_i64 fregs[16];
166 void s390x_translate_init(void)
168 int i;
170 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
171 psw_addr = tcg_global_mem_new_i64(cpu_env,
172 offsetof(CPUS390XState, psw.addr),
173 "psw_addr");
174 psw_mask = tcg_global_mem_new_i64(cpu_env,
175 offsetof(CPUS390XState, psw.mask),
176 "psw_mask");
177 gbea = tcg_global_mem_new_i64(cpu_env,
178 offsetof(CPUS390XState, gbea),
179 "gbea");
181 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
182 "cc_op");
183 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
184 "cc_src");
185 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
186 "cc_dst");
187 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
188 "cc_vr");
190 for (i = 0; i < 16; i++) {
191 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
192 regs[i] = tcg_global_mem_new(cpu_env,
193 offsetof(CPUS390XState, regs[i]),
194 cpu_reg_names[i]);
197 for (i = 0; i < 16; i++) {
198 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
199 fregs[i] = tcg_global_mem_new(cpu_env,
200 offsetof(CPUS390XState, vregs[i][0].d),
201 cpu_reg_names[i + 16]);
205 static TCGv_i64 load_reg(int reg)
207 TCGv_i64 r = tcg_temp_new_i64();
208 tcg_gen_mov_i64(r, regs[reg]);
209 return r;
212 static TCGv_i64 load_freg32_i64(int reg)
214 TCGv_i64 r = tcg_temp_new_i64();
215 tcg_gen_shri_i64(r, fregs[reg], 32);
216 return r;
219 static void store_reg(int reg, TCGv_i64 v)
221 tcg_gen_mov_i64(regs[reg], v);
224 static void store_freg(int reg, TCGv_i64 v)
226 tcg_gen_mov_i64(fregs[reg], v);
229 static void store_reg32_i64(int reg, TCGv_i64 v)
231 /* 32 bit register writes keep the upper half */
232 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
235 static void store_reg32h_i64(int reg, TCGv_i64 v)
237 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
240 static void store_freg32_i64(int reg, TCGv_i64 v)
242 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
245 static void return_low128(TCGv_i64 dest)
247 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
250 static void update_psw_addr(DisasContext *s)
252 /* psw.addr */
253 tcg_gen_movi_i64(psw_addr, s->pc);
256 static void per_branch(DisasContext *s, bool to_next)
258 #ifndef CONFIG_USER_ONLY
259 tcg_gen_movi_i64(gbea, s->pc);
261 if (s->tb->flags & FLAG_MASK_PER) {
262 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
263 gen_helper_per_branch(cpu_env, gbea, next_pc);
264 if (to_next) {
265 tcg_temp_free_i64(next_pc);
268 #endif
271 static void per_branch_cond(DisasContext *s, TCGCond cond,
272 TCGv_i64 arg1, TCGv_i64 arg2)
274 #ifndef CONFIG_USER_ONLY
275 if (s->tb->flags & FLAG_MASK_PER) {
276 TCGLabel *lab = gen_new_label();
277 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
279 tcg_gen_movi_i64(gbea, s->pc);
280 gen_helper_per_branch(cpu_env, gbea, psw_addr);
282 gen_set_label(lab);
283 } else {
284 TCGv_i64 pc = tcg_const_i64(s->pc);
285 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
286 tcg_temp_free_i64(pc);
288 #endif
291 static void per_breaking_event(DisasContext *s)
293 tcg_gen_movi_i64(gbea, s->pc);
296 static void update_cc_op(DisasContext *s)
298 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
299 tcg_gen_movi_i32(cc_op, s->cc_op);
303 static void potential_page_fault(DisasContext *s)
305 update_psw_addr(s);
306 update_cc_op(s);
309 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
311 return (uint64_t)cpu_lduw_code(env, pc);
314 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
316 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
319 static int get_mem_index(DisasContext *s)
321 switch (s->tb->flags & FLAG_MASK_ASC) {
322 case PSW_ASC_PRIMARY >> 32:
323 return 0;
324 case PSW_ASC_SECONDARY >> 32:
325 return 1;
326 case PSW_ASC_HOME >> 32:
327 return 2;
328 default:
329 tcg_abort();
330 break;
334 static void gen_exception(int excp)
336 TCGv_i32 tmp = tcg_const_i32(excp);
337 gen_helper_exception(cpu_env, tmp);
338 tcg_temp_free_i32(tmp);
341 static void gen_program_exception(DisasContext *s, int code)
343 TCGv_i32 tmp;
345 /* Remember what pgm exeption this was. */
346 tmp = tcg_const_i32(code);
347 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
348 tcg_temp_free_i32(tmp);
350 tmp = tcg_const_i32(s->next_pc - s->pc);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
352 tcg_temp_free_i32(tmp);
354 /* Advance past instruction. */
355 s->pc = s->next_pc;
356 update_psw_addr(s);
358 /* Save off cc. */
359 update_cc_op(s);
361 /* Trigger exception. */
362 gen_exception(EXCP_PGM);
365 static inline void gen_illegal_opcode(DisasContext *s)
367 gen_program_exception(s, PGM_OPERATION);
370 static inline void gen_trap(DisasContext *s)
372 TCGv_i32 t;
374 /* Set DXC to 0xff. */
375 t = tcg_temp_new_i32();
376 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
377 tcg_gen_ori_i32(t, t, 0xff00);
378 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
379 tcg_temp_free_i32(t);
381 gen_program_exception(s, PGM_DATA);
384 #ifndef CONFIG_USER_ONLY
385 static void check_privileged(DisasContext *s)
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
391 #endif
393 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
395 TCGv_i64 tmp = tcg_temp_new_i64();
396 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
398 /* Note that d2 is limited to 20 bits, signed. If we crop negative
399 displacements early we create larger immedate addends. */
401 /* Note that addi optimizes the imm==0 case. */
402 if (b2 && x2) {
403 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
404 tcg_gen_addi_i64(tmp, tmp, d2);
405 } else if (b2) {
406 tcg_gen_addi_i64(tmp, regs[b2], d2);
407 } else if (x2) {
408 tcg_gen_addi_i64(tmp, regs[x2], d2);
409 } else {
410 if (need_31) {
411 d2 &= 0x7fffffff;
412 need_31 = false;
414 tcg_gen_movi_i64(tmp, d2);
416 if (need_31) {
417 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
420 return tmp;
423 static inline bool live_cc_data(DisasContext *s)
425 return (s->cc_op != CC_OP_DYNAMIC
426 && s->cc_op != CC_OP_STATIC
427 && s->cc_op > 3);
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
432 if (live_cc_data(s)) {
433 tcg_gen_discard_i64(cc_src);
434 tcg_gen_discard_i64(cc_dst);
435 tcg_gen_discard_i64(cc_vr);
437 s->cc_op = CC_OP_CONST0 + val;
440 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
442 if (live_cc_data(s)) {
443 tcg_gen_discard_i64(cc_src);
444 tcg_gen_discard_i64(cc_vr);
446 tcg_gen_mov_i64(cc_dst, dst);
447 s->cc_op = op;
450 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
451 TCGv_i64 dst)
453 if (live_cc_data(s)) {
454 tcg_gen_discard_i64(cc_vr);
456 tcg_gen_mov_i64(cc_src, src);
457 tcg_gen_mov_i64(cc_dst, dst);
458 s->cc_op = op;
461 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
462 TCGv_i64 dst, TCGv_i64 vr)
464 tcg_gen_mov_i64(cc_src, src);
465 tcg_gen_mov_i64(cc_dst, dst);
466 tcg_gen_mov_i64(cc_vr, vr);
467 s->cc_op = op;
470 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
472 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
475 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
477 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
480 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
482 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
485 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
487 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
490 /* CC value is in env->cc_op */
491 static void set_cc_static(DisasContext *s)
493 if (live_cc_data(s)) {
494 tcg_gen_discard_i64(cc_src);
495 tcg_gen_discard_i64(cc_dst);
496 tcg_gen_discard_i64(cc_vr);
498 s->cc_op = CC_OP_STATIC;
501 /* calculates cc into cc_op */
502 static void gen_op_calc_cc(DisasContext *s)
504 TCGv_i32 local_cc_op;
505 TCGv_i64 dummy;
507 TCGV_UNUSED_I32(local_cc_op);
508 TCGV_UNUSED_I64(dummy);
509 switch (s->cc_op) {
510 default:
511 dummy = tcg_const_i64(0);
512 /* FALLTHRU */
513 case CC_OP_ADD_64:
514 case CC_OP_ADDU_64:
515 case CC_OP_ADDC_64:
516 case CC_OP_SUB_64:
517 case CC_OP_SUBU_64:
518 case CC_OP_SUBB_64:
519 case CC_OP_ADD_32:
520 case CC_OP_ADDU_32:
521 case CC_OP_ADDC_32:
522 case CC_OP_SUB_32:
523 case CC_OP_SUBU_32:
524 case CC_OP_SUBB_32:
525 local_cc_op = tcg_const_i32(s->cc_op);
526 break;
527 case CC_OP_CONST0:
528 case CC_OP_CONST1:
529 case CC_OP_CONST2:
530 case CC_OP_CONST3:
531 case CC_OP_STATIC:
532 case CC_OP_DYNAMIC:
533 break;
536 switch (s->cc_op) {
537 case CC_OP_CONST0:
538 case CC_OP_CONST1:
539 case CC_OP_CONST2:
540 case CC_OP_CONST3:
541 /* s->cc_op is the cc value */
542 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
543 break;
544 case CC_OP_STATIC:
545 /* env->cc_op already is the cc value */
546 break;
547 case CC_OP_NZ:
548 case CC_OP_ABS_64:
549 case CC_OP_NABS_64:
550 case CC_OP_ABS_32:
551 case CC_OP_NABS_32:
552 case CC_OP_LTGT0_32:
553 case CC_OP_LTGT0_64:
554 case CC_OP_COMP_32:
555 case CC_OP_COMP_64:
556 case CC_OP_NZ_F32:
557 case CC_OP_NZ_F64:
558 case CC_OP_FLOGR:
559 /* 1 argument */
560 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
561 break;
562 case CC_OP_ICM:
563 case CC_OP_LTGT_32:
564 case CC_OP_LTGT_64:
565 case CC_OP_LTUGTU_32:
566 case CC_OP_LTUGTU_64:
567 case CC_OP_TM_32:
568 case CC_OP_TM_64:
569 case CC_OP_SLA_32:
570 case CC_OP_SLA_64:
571 case CC_OP_NZ_F128:
572 /* 2 arguments */
573 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
574 break;
575 case CC_OP_ADD_64:
576 case CC_OP_ADDU_64:
577 case CC_OP_ADDC_64:
578 case CC_OP_SUB_64:
579 case CC_OP_SUBU_64:
580 case CC_OP_SUBB_64:
581 case CC_OP_ADD_32:
582 case CC_OP_ADDU_32:
583 case CC_OP_ADDC_32:
584 case CC_OP_SUB_32:
585 case CC_OP_SUBU_32:
586 case CC_OP_SUBB_32:
587 /* 3 arguments */
588 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
589 break;
590 case CC_OP_DYNAMIC:
591 /* unknown operation - assume 3 arguments and cc_op in env */
592 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
593 break;
594 default:
595 tcg_abort();
598 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
599 tcg_temp_free_i32(local_cc_op);
601 if (!TCGV_IS_UNUSED_I64(dummy)) {
602 tcg_temp_free_i64(dummy);
605 /* We now have cc in cc_op as constant */
606 set_cc_static(s);
609 static int use_goto_tb(DisasContext *s, uint64_t dest)
611 if (unlikely(s->singlestep_enabled) ||
612 (s->tb->cflags & CF_LAST_IO) ||
613 (s->tb->flags & FLAG_MASK_PER)) {
614 return false;
616 #ifndef CONFIG_USER_ONLY
617 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
618 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
619 #else
620 return true;
621 #endif
624 static void account_noninline_branch(DisasContext *s, int cc_op)
626 #ifdef DEBUG_INLINE_BRANCHES
627 inline_branch_miss[cc_op]++;
628 #endif
631 static void account_inline_branch(DisasContext *s, int cc_op)
633 #ifdef DEBUG_INLINE_BRANCHES
634 inline_branch_hit[cc_op]++;
635 #endif
638 /* Table of mask values to comparison codes, given a comparison as input.
639 For such, CC=3 should not be possible. */
640 static const TCGCond ltgt_cond[16] = {
641 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
642 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
643 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
644 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
645 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
646 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
647 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
648 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
651 /* Table of mask values to comparison codes, given a logic op as input.
652 For such, only CC=0 and CC=1 should be possible. */
653 static const TCGCond nz_cond[16] = {
654 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
655 TCG_COND_NEVER, TCG_COND_NEVER,
656 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
657 TCG_COND_NE, TCG_COND_NE,
658 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
659 TCG_COND_EQ, TCG_COND_EQ,
660 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
661 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
664 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
665 details required to generate a TCG comparison. */
666 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
668 TCGCond cond;
669 enum cc_op old_cc_op = s->cc_op;
671 if (mask == 15 || mask == 0) {
672 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
673 c->u.s32.a = cc_op;
674 c->u.s32.b = cc_op;
675 c->g1 = c->g2 = true;
676 c->is_64 = false;
677 return;
680 /* Find the TCG condition for the mask + cc op. */
681 switch (old_cc_op) {
682 case CC_OP_LTGT0_32:
683 case CC_OP_LTGT0_64:
684 case CC_OP_LTGT_32:
685 case CC_OP_LTGT_64:
686 cond = ltgt_cond[mask];
687 if (cond == TCG_COND_NEVER) {
688 goto do_dynamic;
690 account_inline_branch(s, old_cc_op);
691 break;
693 case CC_OP_LTUGTU_32:
694 case CC_OP_LTUGTU_64:
695 cond = tcg_unsigned_cond(ltgt_cond[mask]);
696 if (cond == TCG_COND_NEVER) {
697 goto do_dynamic;
699 account_inline_branch(s, old_cc_op);
700 break;
702 case CC_OP_NZ:
703 cond = nz_cond[mask];
704 if (cond == TCG_COND_NEVER) {
705 goto do_dynamic;
707 account_inline_branch(s, old_cc_op);
708 break;
710 case CC_OP_TM_32:
711 case CC_OP_TM_64:
712 switch (mask) {
713 case 8:
714 cond = TCG_COND_EQ;
715 break;
716 case 4 | 2 | 1:
717 cond = TCG_COND_NE;
718 break;
719 default:
720 goto do_dynamic;
722 account_inline_branch(s, old_cc_op);
723 break;
725 case CC_OP_ICM:
726 switch (mask) {
727 case 8:
728 cond = TCG_COND_EQ;
729 break;
730 case 4 | 2 | 1:
731 case 4 | 2:
732 cond = TCG_COND_NE;
733 break;
734 default:
735 goto do_dynamic;
737 account_inline_branch(s, old_cc_op);
738 break;
740 case CC_OP_FLOGR:
741 switch (mask & 0xa) {
742 case 8: /* src == 0 -> no one bit found */
743 cond = TCG_COND_EQ;
744 break;
745 case 2: /* src != 0 -> one bit found */
746 cond = TCG_COND_NE;
747 break;
748 default:
749 goto do_dynamic;
751 account_inline_branch(s, old_cc_op);
752 break;
754 case CC_OP_ADDU_32:
755 case CC_OP_ADDU_64:
756 switch (mask) {
757 case 8 | 2: /* vr == 0 */
758 cond = TCG_COND_EQ;
759 break;
760 case 4 | 1: /* vr != 0 */
761 cond = TCG_COND_NE;
762 break;
763 case 8 | 4: /* no carry -> vr >= src */
764 cond = TCG_COND_GEU;
765 break;
766 case 2 | 1: /* carry -> vr < src */
767 cond = TCG_COND_LTU;
768 break;
769 default:
770 goto do_dynamic;
772 account_inline_branch(s, old_cc_op);
773 break;
775 case CC_OP_SUBU_32:
776 case CC_OP_SUBU_64:
777 /* Note that CC=0 is impossible; treat it as dont-care. */
778 switch (mask & 7) {
779 case 2: /* zero -> op1 == op2 */
780 cond = TCG_COND_EQ;
781 break;
782 case 4 | 1: /* !zero -> op1 != op2 */
783 cond = TCG_COND_NE;
784 break;
785 case 4: /* borrow (!carry) -> op1 < op2 */
786 cond = TCG_COND_LTU;
787 break;
788 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
789 cond = TCG_COND_GEU;
790 break;
791 default:
792 goto do_dynamic;
794 account_inline_branch(s, old_cc_op);
795 break;
797 default:
798 do_dynamic:
799 /* Calculate cc value. */
800 gen_op_calc_cc(s);
801 /* FALLTHRU */
803 case CC_OP_STATIC:
804 /* Jump based on CC. We'll load up the real cond below;
805 the assignment here merely avoids a compiler warning. */
806 account_noninline_branch(s, old_cc_op);
807 old_cc_op = CC_OP_STATIC;
808 cond = TCG_COND_NEVER;
809 break;
812 /* Load up the arguments of the comparison. */
813 c->is_64 = true;
814 c->g1 = c->g2 = false;
815 switch (old_cc_op) {
816 case CC_OP_LTGT0_32:
817 c->is_64 = false;
818 c->u.s32.a = tcg_temp_new_i32();
819 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
820 c->u.s32.b = tcg_const_i32(0);
821 break;
822 case CC_OP_LTGT_32:
823 case CC_OP_LTUGTU_32:
824 case CC_OP_SUBU_32:
825 c->is_64 = false;
826 c->u.s32.a = tcg_temp_new_i32();
827 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
828 c->u.s32.b = tcg_temp_new_i32();
829 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
830 break;
832 case CC_OP_LTGT0_64:
833 case CC_OP_NZ:
834 case CC_OP_FLOGR:
835 c->u.s64.a = cc_dst;
836 c->u.s64.b = tcg_const_i64(0);
837 c->g1 = true;
838 break;
839 case CC_OP_LTGT_64:
840 case CC_OP_LTUGTU_64:
841 case CC_OP_SUBU_64:
842 c->u.s64.a = cc_src;
843 c->u.s64.b = cc_dst;
844 c->g1 = c->g2 = true;
845 break;
847 case CC_OP_TM_32:
848 case CC_OP_TM_64:
849 case CC_OP_ICM:
850 c->u.s64.a = tcg_temp_new_i64();
851 c->u.s64.b = tcg_const_i64(0);
852 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
853 break;
855 case CC_OP_ADDU_32:
856 c->is_64 = false;
857 c->u.s32.a = tcg_temp_new_i32();
858 c->u.s32.b = tcg_temp_new_i32();
859 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
860 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
861 tcg_gen_movi_i32(c->u.s32.b, 0);
862 } else {
863 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
865 break;
867 case CC_OP_ADDU_64:
868 c->u.s64.a = cc_vr;
869 c->g1 = true;
870 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
871 c->u.s64.b = tcg_const_i64(0);
872 } else {
873 c->u.s64.b = cc_src;
874 c->g2 = true;
876 break;
878 case CC_OP_STATIC:
879 c->is_64 = false;
880 c->u.s32.a = cc_op;
881 c->g1 = true;
882 switch (mask) {
883 case 0x8 | 0x4 | 0x2: /* cc != 3 */
884 cond = TCG_COND_NE;
885 c->u.s32.b = tcg_const_i32(3);
886 break;
887 case 0x8 | 0x4 | 0x1: /* cc != 2 */
888 cond = TCG_COND_NE;
889 c->u.s32.b = tcg_const_i32(2);
890 break;
891 case 0x8 | 0x2 | 0x1: /* cc != 1 */
892 cond = TCG_COND_NE;
893 c->u.s32.b = tcg_const_i32(1);
894 break;
895 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
896 cond = TCG_COND_EQ;
897 c->g1 = false;
898 c->u.s32.a = tcg_temp_new_i32();
899 c->u.s32.b = tcg_const_i32(0);
900 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
901 break;
902 case 0x8 | 0x4: /* cc < 2 */
903 cond = TCG_COND_LTU;
904 c->u.s32.b = tcg_const_i32(2);
905 break;
906 case 0x8: /* cc == 0 */
907 cond = TCG_COND_EQ;
908 c->u.s32.b = tcg_const_i32(0);
909 break;
910 case 0x4 | 0x2 | 0x1: /* cc != 0 */
911 cond = TCG_COND_NE;
912 c->u.s32.b = tcg_const_i32(0);
913 break;
914 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
915 cond = TCG_COND_NE;
916 c->g1 = false;
917 c->u.s32.a = tcg_temp_new_i32();
918 c->u.s32.b = tcg_const_i32(0);
919 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
920 break;
921 case 0x4: /* cc == 1 */
922 cond = TCG_COND_EQ;
923 c->u.s32.b = tcg_const_i32(1);
924 break;
925 case 0x2 | 0x1: /* cc > 1 */
926 cond = TCG_COND_GTU;
927 c->u.s32.b = tcg_const_i32(1);
928 break;
929 case 0x2: /* cc == 2 */
930 cond = TCG_COND_EQ;
931 c->u.s32.b = tcg_const_i32(2);
932 break;
933 case 0x1: /* cc == 3 */
934 cond = TCG_COND_EQ;
935 c->u.s32.b = tcg_const_i32(3);
936 break;
937 default:
938 /* CC is masked by something else: (8 >> cc) & mask. */
939 cond = TCG_COND_NE;
940 c->g1 = false;
941 c->u.s32.a = tcg_const_i32(8);
942 c->u.s32.b = tcg_const_i32(0);
943 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
944 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
945 break;
947 break;
949 default:
950 abort();
952 c->cond = cond;
955 static void free_compare(DisasCompare *c)
957 if (!c->g1) {
958 if (c->is_64) {
959 tcg_temp_free_i64(c->u.s64.a);
960 } else {
961 tcg_temp_free_i32(c->u.s32.a);
964 if (!c->g2) {
965 if (c->is_64) {
966 tcg_temp_free_i64(c->u.s64.b);
967 } else {
968 tcg_temp_free_i32(c->u.s32.b);
973 /* ====================================================================== */
974 /* Define the insn format enumeration. */
975 #define F0(N) FMT_##N,
976 #define F1(N, X1) F0(N)
977 #define F2(N, X1, X2) F0(N)
978 #define F3(N, X1, X2, X3) F0(N)
979 #define F4(N, X1, X2, X3, X4) F0(N)
980 #define F5(N, X1, X2, X3, X4, X5) F0(N)
982 typedef enum {
983 #include "insn-format.def"
984 } DisasFormat;
986 #undef F0
987 #undef F1
988 #undef F2
989 #undef F3
990 #undef F4
991 #undef F5
993 /* Define a structure to hold the decoded fields. We'll store each inside
994 an array indexed by an enum. In order to conserve memory, we'll arrange
995 for fields that do not exist at the same time to overlap, thus the "C"
996 for compact. For checking purposes there is an "O" for original index
997 as well that will be applied to availability bitmaps. */
999 enum DisasFieldIndexO {
1000 FLD_O_r1,
1001 FLD_O_r2,
1002 FLD_O_r3,
1003 FLD_O_m1,
1004 FLD_O_m3,
1005 FLD_O_m4,
1006 FLD_O_b1,
1007 FLD_O_b2,
1008 FLD_O_b4,
1009 FLD_O_d1,
1010 FLD_O_d2,
1011 FLD_O_d4,
1012 FLD_O_x2,
1013 FLD_O_l1,
1014 FLD_O_l2,
1015 FLD_O_i1,
1016 FLD_O_i2,
1017 FLD_O_i3,
1018 FLD_O_i4,
1019 FLD_O_i5
1022 enum DisasFieldIndexC {
1023 FLD_C_r1 = 0,
1024 FLD_C_m1 = 0,
1025 FLD_C_b1 = 0,
1026 FLD_C_i1 = 0,
1028 FLD_C_r2 = 1,
1029 FLD_C_b2 = 1,
1030 FLD_C_i2 = 1,
1032 FLD_C_r3 = 2,
1033 FLD_C_m3 = 2,
1034 FLD_C_i3 = 2,
1036 FLD_C_m4 = 3,
1037 FLD_C_b4 = 3,
1038 FLD_C_i4 = 3,
1039 FLD_C_l1 = 3,
1041 FLD_C_i5 = 4,
1042 FLD_C_d1 = 4,
1044 FLD_C_d2 = 5,
1046 FLD_C_d4 = 6,
1047 FLD_C_x2 = 6,
1048 FLD_C_l2 = 6,
1050 NUM_C_FIELD = 7
1053 struct DisasFields {
1054 uint64_t raw_insn;
1055 unsigned op:8;
1056 unsigned op2:8;
1057 unsigned presentC:16;
1058 unsigned int presentO;
1059 int c[NUM_C_FIELD];
1062 /* This is the way fields are to be accessed out of DisasFields. */
1063 #define have_field(S, F) have_field1((S), FLD_O_##F)
1064 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1066 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1068 return (f->presentO >> c) & 1;
1071 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1072 enum DisasFieldIndexC c)
1074 assert(have_field1(f, o));
1075 return f->c[c];
1078 /* Describe the layout of each field in each format. */
1079 typedef struct DisasField {
1080 unsigned int beg:8;
1081 unsigned int size:8;
1082 unsigned int type:2;
1083 unsigned int indexC:6;
1084 enum DisasFieldIndexO indexO:8;
1085 } DisasField;
1087 typedef struct DisasFormatInfo {
1088 DisasField op[NUM_C_FIELD];
1089 } DisasFormatInfo;
1091 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1092 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1093 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1094 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1095 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1097 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1098 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1099 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1100 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1102 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1103 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1104 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1106 #define F0(N) { { } },
1107 #define F1(N, X1) { { X1 } },
1108 #define F2(N, X1, X2) { { X1, X2 } },
1109 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1110 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1111 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1113 static const DisasFormatInfo format_info[] = {
1114 #include "insn-format.def"
1117 #undef F0
1118 #undef F1
1119 #undef F2
1120 #undef F3
1121 #undef F4
1122 #undef F5
1123 #undef R
1124 #undef M
1125 #undef BD
1126 #undef BXD
1127 #undef BDL
1128 #undef BXDL
1129 #undef I
1130 #undef L
1132 /* Generally, we'll extract operands into this structures, operate upon
1133 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1134 of routines below for more details. */
1135 typedef struct {
1136 bool g_out, g_out2, g_in1, g_in2;
1137 TCGv_i64 out, out2, in1, in2;
1138 TCGv_i64 addr1;
1139 } DisasOps;
1141 /* Instructions can place constraints on their operands, raising specification
1142 exceptions if they are violated. To make this easy to automate, each "in1",
1143 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1144 of the following, or 0. To make this easy to document, we'll put the
1145 SPEC_<name> defines next to <name>. */
1147 #define SPEC_r1_even 1
1148 #define SPEC_r2_even 2
1149 #define SPEC_r3_even 4
1150 #define SPEC_r1_f128 8
1151 #define SPEC_r2_f128 16
1153 /* Return values from translate_one, indicating the state of the TB. */
1154 typedef enum {
1155 /* Continue the TB. */
1156 NO_EXIT,
1157 /* We have emitted one or more goto_tb. No fixup required. */
1158 EXIT_GOTO_TB,
1159 /* We are not using a goto_tb (for whatever reason), but have updated
1160 the PC (for whatever reason), so there's no need to do it again on
1161 exiting the TB. */
1162 EXIT_PC_UPDATED,
1163 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1164 updated the PC for the next instruction to be executed. */
1165 EXIT_PC_STALE,
1166 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1167 No following code will be executed. */
1168 EXIT_NORETURN,
1169 } ExitStatus;
1171 typedef enum DisasFacility {
1172 FAC_Z, /* zarch (default) */
1173 FAC_CASS, /* compare and swap and store */
1174 FAC_CASS2, /* compare and swap and store 2*/
1175 FAC_DFP, /* decimal floating point */
1176 FAC_DFPR, /* decimal floating point rounding */
1177 FAC_DO, /* distinct operands */
1178 FAC_EE, /* execute extensions */
1179 FAC_EI, /* extended immediate */
1180 FAC_FPE, /* floating point extension */
1181 FAC_FPSSH, /* floating point support sign handling */
1182 FAC_FPRGR, /* FPR-GR transfer */
1183 FAC_GIE, /* general instructions extension */
1184 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1185 FAC_HW, /* high-word */
1186 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1187 FAC_MIE, /* miscellaneous-instruction-extensions */
1188 FAC_LAT, /* load-and-trap */
1189 FAC_LOC, /* load/store on condition */
1190 FAC_LD, /* long displacement */
1191 FAC_PC, /* population count */
1192 FAC_SCF, /* store clock fast */
1193 FAC_SFLE, /* store facility list extended */
1194 FAC_ILA, /* interlocked access facility 1 */
1195 } DisasFacility;
1197 struct DisasInsn {
1198 unsigned opc:16;
1199 DisasFormat fmt:8;
1200 DisasFacility fac:8;
1201 unsigned spec:8;
1203 const char *name;
1205 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1206 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1207 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1208 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1209 void (*help_cout)(DisasContext *, DisasOps *);
1210 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1212 uint64_t data;
1215 /* ====================================================================== */
1216 /* Miscellaneous helpers, used by several operations. */
1218 static void help_l2_shift(DisasContext *s, DisasFields *f,
1219 DisasOps *o, int mask)
1221 int b2 = get_field(f, b2);
1222 int d2 = get_field(f, d2);
1224 if (b2 == 0) {
1225 o->in2 = tcg_const_i64(d2 & mask);
1226 } else {
1227 o->in2 = get_address(s, 0, b2, d2);
1228 tcg_gen_andi_i64(o->in2, o->in2, mask);
1232 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1234 if (dest == s->next_pc) {
1235 per_branch(s, true);
1236 return NO_EXIT;
1238 if (use_goto_tb(s, dest)) {
1239 update_cc_op(s);
1240 per_breaking_event(s);
1241 tcg_gen_goto_tb(0);
1242 tcg_gen_movi_i64(psw_addr, dest);
1243 tcg_gen_exit_tb((uintptr_t)s->tb);
1244 return EXIT_GOTO_TB;
1245 } else {
1246 tcg_gen_movi_i64(psw_addr, dest);
1247 per_branch(s, false);
1248 return EXIT_PC_UPDATED;
1252 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1253 bool is_imm, int imm, TCGv_i64 cdest)
1255 ExitStatus ret;
1256 uint64_t dest = s->pc + 2 * imm;
1257 TCGLabel *lab;
1259 /* Take care of the special cases first. */
1260 if (c->cond == TCG_COND_NEVER) {
1261 ret = NO_EXIT;
1262 goto egress;
1264 if (is_imm) {
1265 if (dest == s->next_pc) {
1266 /* Branch to next. */
1267 per_branch(s, true);
1268 ret = NO_EXIT;
1269 goto egress;
1271 if (c->cond == TCG_COND_ALWAYS) {
1272 ret = help_goto_direct(s, dest);
1273 goto egress;
1275 } else {
1276 if (TCGV_IS_UNUSED_I64(cdest)) {
1277 /* E.g. bcr %r0 -> no branch. */
1278 ret = NO_EXIT;
1279 goto egress;
1281 if (c->cond == TCG_COND_ALWAYS) {
1282 tcg_gen_mov_i64(psw_addr, cdest);
1283 per_branch(s, false);
1284 ret = EXIT_PC_UPDATED;
1285 goto egress;
1289 if (use_goto_tb(s, s->next_pc)) {
1290 if (is_imm && use_goto_tb(s, dest)) {
1291 /* Both exits can use goto_tb. */
1292 update_cc_op(s);
1294 lab = gen_new_label();
1295 if (c->is_64) {
1296 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1297 } else {
1298 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1301 /* Branch not taken. */
1302 tcg_gen_goto_tb(0);
1303 tcg_gen_movi_i64(psw_addr, s->next_pc);
1304 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1306 /* Branch taken. */
1307 gen_set_label(lab);
1308 per_breaking_event(s);
1309 tcg_gen_goto_tb(1);
1310 tcg_gen_movi_i64(psw_addr, dest);
1311 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1313 ret = EXIT_GOTO_TB;
1314 } else {
1315 /* Fallthru can use goto_tb, but taken branch cannot. */
1316 /* Store taken branch destination before the brcond. This
1317 avoids having to allocate a new local temp to hold it.
1318 We'll overwrite this in the not taken case anyway. */
1319 if (!is_imm) {
1320 tcg_gen_mov_i64(psw_addr, cdest);
1323 lab = gen_new_label();
1324 if (c->is_64) {
1325 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1326 } else {
1327 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1330 /* Branch not taken. */
1331 update_cc_op(s);
1332 tcg_gen_goto_tb(0);
1333 tcg_gen_movi_i64(psw_addr, s->next_pc);
1334 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1336 gen_set_label(lab);
1337 if (is_imm) {
1338 tcg_gen_movi_i64(psw_addr, dest);
1340 per_breaking_event(s);
1341 ret = EXIT_PC_UPDATED;
1343 } else {
1344 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1345 Most commonly we're single-stepping or some other condition that
1346 disables all use of goto_tb. Just update the PC and exit. */
1348 TCGv_i64 next = tcg_const_i64(s->next_pc);
1349 if (is_imm) {
1350 cdest = tcg_const_i64(dest);
1353 if (c->is_64) {
1354 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1355 cdest, next);
1356 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1357 } else {
1358 TCGv_i32 t0 = tcg_temp_new_i32();
1359 TCGv_i64 t1 = tcg_temp_new_i64();
1360 TCGv_i64 z = tcg_const_i64(0);
1361 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1362 tcg_gen_extu_i32_i64(t1, t0);
1363 tcg_temp_free_i32(t0);
1364 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1365 per_branch_cond(s, TCG_COND_NE, t1, z);
1366 tcg_temp_free_i64(t1);
1367 tcg_temp_free_i64(z);
1370 if (is_imm) {
1371 tcg_temp_free_i64(cdest);
1373 tcg_temp_free_i64(next);
1375 ret = EXIT_PC_UPDATED;
1378 egress:
1379 free_compare(c);
1380 return ret;
1383 /* ====================================================================== */
1384 /* The operations. These perform the bulk of the work for any insn,
1385 usually after the operands have been loaded and output initialized. */
1387 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1389 TCGv_i64 z, n;
1390 z = tcg_const_i64(0);
1391 n = tcg_temp_new_i64();
1392 tcg_gen_neg_i64(n, o->in2);
1393 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1394 tcg_temp_free_i64(n);
1395 tcg_temp_free_i64(z);
1396 return NO_EXIT;
1399 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1401 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1402 return NO_EXIT;
1405 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1407 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1408 return NO_EXIT;
1411 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1413 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1414 tcg_gen_mov_i64(o->out2, o->in2);
1415 return NO_EXIT;
1418 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1420 tcg_gen_add_i64(o->out, o->in1, o->in2);
1421 return NO_EXIT;
1424 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1426 DisasCompare cmp;
1427 TCGv_i64 carry;
1429 tcg_gen_add_i64(o->out, o->in1, o->in2);
1431 /* The carry flag is the msb of CC, therefore the branch mask that would
1432 create that comparison is 3. Feeding the generated comparison to
1433 setcond produces the carry flag that we desire. */
1434 disas_jcc(s, &cmp, 3);
1435 carry = tcg_temp_new_i64();
1436 if (cmp.is_64) {
1437 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1438 } else {
1439 TCGv_i32 t = tcg_temp_new_i32();
1440 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1441 tcg_gen_extu_i32_i64(carry, t);
1442 tcg_temp_free_i32(t);
1444 free_compare(&cmp);
1446 tcg_gen_add_i64(o->out, o->out, carry);
1447 tcg_temp_free_i64(carry);
1448 return NO_EXIT;
1451 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1453 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1454 return NO_EXIT;
1457 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1459 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1460 return NO_EXIT;
1463 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1465 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1466 return_low128(o->out2);
1467 return NO_EXIT;
1470 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1472 tcg_gen_and_i64(o->out, o->in1, o->in2);
1473 return NO_EXIT;
1476 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1478 int shift = s->insn->data & 0xff;
1479 int size = s->insn->data >> 8;
1480 uint64_t mask = ((1ull << size) - 1) << shift;
1482 assert(!o->g_in2);
1483 tcg_gen_shli_i64(o->in2, o->in2, shift);
1484 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1485 tcg_gen_and_i64(o->out, o->in1, o->in2);
1487 /* Produce the CC from only the bits manipulated. */
1488 tcg_gen_andi_i64(cc_dst, o->out, mask);
1489 set_cc_nz_u64(s, cc_dst);
1490 return NO_EXIT;
1493 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1495 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1496 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1497 tcg_gen_mov_i64(psw_addr, o->in2);
1498 per_branch(s, false);
1499 return EXIT_PC_UPDATED;
1500 } else {
1501 return NO_EXIT;
1505 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1507 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1508 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1511 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1513 int m1 = get_field(s->fields, m1);
1514 bool is_imm = have_field(s->fields, i2);
1515 int imm = is_imm ? get_field(s->fields, i2) : 0;
1516 DisasCompare c;
1518 disas_jcc(s, &c, m1);
1519 return help_branch(s, &c, is_imm, imm, o->in2);
1522 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1524 int r1 = get_field(s->fields, r1);
1525 bool is_imm = have_field(s->fields, i2);
1526 int imm = is_imm ? get_field(s->fields, i2) : 0;
1527 DisasCompare c;
1528 TCGv_i64 t;
1530 c.cond = TCG_COND_NE;
1531 c.is_64 = false;
1532 c.g1 = false;
1533 c.g2 = false;
1535 t = tcg_temp_new_i64();
1536 tcg_gen_subi_i64(t, regs[r1], 1);
1537 store_reg32_i64(r1, t);
1538 c.u.s32.a = tcg_temp_new_i32();
1539 c.u.s32.b = tcg_const_i32(0);
1540 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1541 tcg_temp_free_i64(t);
1543 return help_branch(s, &c, is_imm, imm, o->in2);
1546 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1548 int r1 = get_field(s->fields, r1);
1549 int imm = get_field(s->fields, i2);
1550 DisasCompare c;
1551 TCGv_i64 t;
1553 c.cond = TCG_COND_NE;
1554 c.is_64 = false;
1555 c.g1 = false;
1556 c.g2 = false;
1558 t = tcg_temp_new_i64();
1559 tcg_gen_shri_i64(t, regs[r1], 32);
1560 tcg_gen_subi_i64(t, t, 1);
1561 store_reg32h_i64(r1, t);
1562 c.u.s32.a = tcg_temp_new_i32();
1563 c.u.s32.b = tcg_const_i32(0);
1564 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1565 tcg_temp_free_i64(t);
1567 return help_branch(s, &c, 1, imm, o->in2);
1570 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1572 int r1 = get_field(s->fields, r1);
1573 bool is_imm = have_field(s->fields, i2);
1574 int imm = is_imm ? get_field(s->fields, i2) : 0;
1575 DisasCompare c;
1577 c.cond = TCG_COND_NE;
1578 c.is_64 = true;
1579 c.g1 = true;
1580 c.g2 = false;
1582 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1583 c.u.s64.a = regs[r1];
1584 c.u.s64.b = tcg_const_i64(0);
1586 return help_branch(s, &c, is_imm, imm, o->in2);
1589 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1591 int r1 = get_field(s->fields, r1);
1592 int r3 = get_field(s->fields, r3);
1593 bool is_imm = have_field(s->fields, i2);
1594 int imm = is_imm ? get_field(s->fields, i2) : 0;
1595 DisasCompare c;
1596 TCGv_i64 t;
1598 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1599 c.is_64 = false;
1600 c.g1 = false;
1601 c.g2 = false;
1603 t = tcg_temp_new_i64();
1604 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1605 c.u.s32.a = tcg_temp_new_i32();
1606 c.u.s32.b = tcg_temp_new_i32();
1607 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1608 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1609 store_reg32_i64(r1, t);
1610 tcg_temp_free_i64(t);
1612 return help_branch(s, &c, is_imm, imm, o->in2);
1615 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1617 int r1 = get_field(s->fields, r1);
1618 int r3 = get_field(s->fields, r3);
1619 bool is_imm = have_field(s->fields, i2);
1620 int imm = is_imm ? get_field(s->fields, i2) : 0;
1621 DisasCompare c;
1623 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1624 c.is_64 = true;
1626 if (r1 == (r3 | 1)) {
1627 c.u.s64.b = load_reg(r3 | 1);
1628 c.g2 = false;
1629 } else {
1630 c.u.s64.b = regs[r3 | 1];
1631 c.g2 = true;
1634 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1635 c.u.s64.a = regs[r1];
1636 c.g1 = true;
1638 return help_branch(s, &c, is_imm, imm, o->in2);
1641 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1643 int imm, m3 = get_field(s->fields, m3);
1644 bool is_imm;
1645 DisasCompare c;
1647 c.cond = ltgt_cond[m3];
1648 if (s->insn->data) {
1649 c.cond = tcg_unsigned_cond(c.cond);
1651 c.is_64 = c.g1 = c.g2 = true;
1652 c.u.s64.a = o->in1;
1653 c.u.s64.b = o->in2;
1655 is_imm = have_field(s->fields, i4);
1656 if (is_imm) {
1657 imm = get_field(s->fields, i4);
1658 } else {
1659 imm = 0;
1660 o->out = get_address(s, 0, get_field(s->fields, b4),
1661 get_field(s->fields, d4));
1664 return help_branch(s, &c, is_imm, imm, o->out);
1667 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1669 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1670 set_cc_static(s);
1671 return NO_EXIT;
1674 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1676 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1677 set_cc_static(s);
1678 return NO_EXIT;
1681 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1683 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1684 set_cc_static(s);
1685 return NO_EXIT;
1688 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1690 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1691 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1692 tcg_temp_free_i32(m3);
1693 gen_set_cc_nz_f32(s, o->in2);
1694 return NO_EXIT;
1697 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1699 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1700 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1701 tcg_temp_free_i32(m3);
1702 gen_set_cc_nz_f64(s, o->in2);
1703 return NO_EXIT;
1706 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1708 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1709 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1710 tcg_temp_free_i32(m3);
1711 gen_set_cc_nz_f128(s, o->in1, o->in2);
1712 return NO_EXIT;
1715 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1717 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1718 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1719 tcg_temp_free_i32(m3);
1720 gen_set_cc_nz_f32(s, o->in2);
1721 return NO_EXIT;
1724 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1726 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1727 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1728 tcg_temp_free_i32(m3);
1729 gen_set_cc_nz_f64(s, o->in2);
1730 return NO_EXIT;
1733 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1735 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1736 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1737 tcg_temp_free_i32(m3);
1738 gen_set_cc_nz_f128(s, o->in1, o->in2);
1739 return NO_EXIT;
1742 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1744 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1745 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1746 tcg_temp_free_i32(m3);
1747 gen_set_cc_nz_f32(s, o->in2);
1748 return NO_EXIT;
1751 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1753 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1754 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1755 tcg_temp_free_i32(m3);
1756 gen_set_cc_nz_f64(s, o->in2);
1757 return NO_EXIT;
1760 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1762 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1763 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1764 tcg_temp_free_i32(m3);
1765 gen_set_cc_nz_f128(s, o->in1, o->in2);
1766 return NO_EXIT;
1769 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1771 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1772 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1773 tcg_temp_free_i32(m3);
1774 gen_set_cc_nz_f32(s, o->in2);
1775 return NO_EXIT;
1778 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1780 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1781 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1782 tcg_temp_free_i32(m3);
1783 gen_set_cc_nz_f64(s, o->in2);
1784 return NO_EXIT;
1787 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1789 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1790 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1791 tcg_temp_free_i32(m3);
1792 gen_set_cc_nz_f128(s, o->in1, o->in2);
1793 return NO_EXIT;
1796 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1800 tcg_temp_free_i32(m3);
1801 return NO_EXIT;
1804 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1806 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1807 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1808 tcg_temp_free_i32(m3);
1809 return NO_EXIT;
1812 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1814 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1815 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1816 tcg_temp_free_i32(m3);
1817 return_low128(o->out2);
1818 return NO_EXIT;
1821 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1823 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1824 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1825 tcg_temp_free_i32(m3);
1826 return NO_EXIT;
1829 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1831 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1832 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1833 tcg_temp_free_i32(m3);
1834 return NO_EXIT;
1837 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1839 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1840 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1841 tcg_temp_free_i32(m3);
1842 return_low128(o->out2);
1843 return NO_EXIT;
1846 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1848 int r2 = get_field(s->fields, r2);
1849 TCGv_i64 len = tcg_temp_new_i64();
1851 potential_page_fault(s);
1852 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1853 set_cc_static(s);
1854 return_low128(o->out);
1856 tcg_gen_add_i64(regs[r2], regs[r2], len);
1857 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1858 tcg_temp_free_i64(len);
1860 return NO_EXIT;
1863 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1865 int l = get_field(s->fields, l1);
1866 TCGv_i32 vl;
1868 switch (l + 1) {
1869 case 1:
1870 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1871 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1872 break;
1873 case 2:
1874 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1875 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1876 break;
1877 case 4:
1878 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1879 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1880 break;
1881 case 8:
1882 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1883 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1884 break;
1885 default:
1886 potential_page_fault(s);
1887 vl = tcg_const_i32(l);
1888 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1889 tcg_temp_free_i32(vl);
1890 set_cc_static(s);
1891 return NO_EXIT;
1893 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1894 return NO_EXIT;
1897 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1899 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1900 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1901 potential_page_fault(s);
1902 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1903 tcg_temp_free_i32(r1);
1904 tcg_temp_free_i32(r3);
1905 set_cc_static(s);
1906 return NO_EXIT;
1909 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1911 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1912 TCGv_i32 t1 = tcg_temp_new_i32();
1913 tcg_gen_extrl_i64_i32(t1, o->in1);
1914 potential_page_fault(s);
1915 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1916 set_cc_static(s);
1917 tcg_temp_free_i32(t1);
1918 tcg_temp_free_i32(m3);
1919 return NO_EXIT;
1922 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1924 potential_page_fault(s);
1925 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1926 set_cc_static(s);
1927 return_low128(o->in2);
1928 return NO_EXIT;
1931 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1933 TCGv_i64 t = tcg_temp_new_i64();
1934 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1935 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1936 tcg_gen_or_i64(o->out, o->out, t);
1937 tcg_temp_free_i64(t);
1938 return NO_EXIT;
1941 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1943 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1944 int d2 = get_field(s->fields, d2);
1945 int b2 = get_field(s->fields, b2);
1946 int is_64 = s->insn->data;
1947 TCGv_i64 addr, mem, cc, z;
1949 /* Note that in1 = R3 (new value) and
1950 in2 = (zero-extended) R1 (expected value). */
1952 /* Load the memory into the (temporary) output. While the PoO only talks
1953 about moving the memory to R1 on inequality, if we include equality it
1954 means that R1 is equal to the memory in all conditions. */
1955 addr = get_address(s, 0, b2, d2);
1956 if (is_64) {
1957 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1958 } else {
1959 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1962 /* Are the memory and expected values (un)equal? Note that this setcond
1963 produces the output CC value, thus the NE sense of the test. */
1964 cc = tcg_temp_new_i64();
1965 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1967 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1968 Recall that we are allowed to unconditionally issue the store (and
1969 thus any possible write trap), so (re-)store the original contents
1970 of MEM in case of inequality. */
1971 z = tcg_const_i64(0);
1972 mem = tcg_temp_new_i64();
1973 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1974 if (is_64) {
1975 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1976 } else {
1977 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1979 tcg_temp_free_i64(z);
1980 tcg_temp_free_i64(mem);
1981 tcg_temp_free_i64(addr);
1983 /* Store CC back to cc_op. Wait until after the store so that any
1984 exception gets the old cc_op value. */
1985 tcg_gen_extrl_i64_i32(cc_op, cc);
1986 tcg_temp_free_i64(cc);
1987 set_cc_static(s);
1988 return NO_EXIT;
1991 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1993 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1994 int r1 = get_field(s->fields, r1);
1995 int r3 = get_field(s->fields, r3);
1996 int d2 = get_field(s->fields, d2);
1997 int b2 = get_field(s->fields, b2);
1998 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
2000 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2002 addrh = get_address(s, 0, b2, d2);
2003 addrl = get_address(s, 0, b2, d2 + 8);
2004 outh = tcg_temp_new_i64();
2005 outl = tcg_temp_new_i64();
2007 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2008 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2010 /* Fold the double-word compare with arithmetic. */
2011 cc = tcg_temp_new_i64();
2012 z = tcg_temp_new_i64();
2013 tcg_gen_xor_i64(cc, outh, regs[r1]);
2014 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2015 tcg_gen_or_i64(cc, cc, z);
2016 tcg_gen_movi_i64(z, 0);
2017 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2019 memh = tcg_temp_new_i64();
2020 meml = tcg_temp_new_i64();
2021 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2022 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2023 tcg_temp_free_i64(z);
2025 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2026 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2027 tcg_temp_free_i64(memh);
2028 tcg_temp_free_i64(meml);
2029 tcg_temp_free_i64(addrh);
2030 tcg_temp_free_i64(addrl);
2032 /* Save back state now that we've passed all exceptions. */
2033 tcg_gen_mov_i64(regs[r1], outh);
2034 tcg_gen_mov_i64(regs[r1 + 1], outl);
2035 tcg_gen_extrl_i64_i32(cc_op, cc);
2036 tcg_temp_free_i64(outh);
2037 tcg_temp_free_i64(outl);
2038 tcg_temp_free_i64(cc);
2039 set_cc_static(s);
2040 return NO_EXIT;
2043 #ifndef CONFIG_USER_ONLY
2044 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2046 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2047 check_privileged(s);
2048 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2049 tcg_temp_free_i32(r1);
2050 set_cc_static(s);
2051 return NO_EXIT;
2053 #endif
2055 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2057 TCGv_i64 t1 = tcg_temp_new_i64();
2058 TCGv_i32 t2 = tcg_temp_new_i32();
2059 tcg_gen_extrl_i64_i32(t2, o->in1);
2060 gen_helper_cvd(t1, t2);
2061 tcg_temp_free_i32(t2);
2062 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2063 tcg_temp_free_i64(t1);
2064 return NO_EXIT;
2067 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2069 int m3 = get_field(s->fields, m3);
2070 TCGLabel *lab = gen_new_label();
2071 TCGCond c;
2073 c = tcg_invert_cond(ltgt_cond[m3]);
2074 if (s->insn->data) {
2075 c = tcg_unsigned_cond(c);
2077 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2079 /* Trap. */
2080 gen_trap(s);
2082 gen_set_label(lab);
2083 return NO_EXIT;
2086 #ifndef CONFIG_USER_ONLY
2087 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2089 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2090 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2091 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2093 check_privileged(s);
2094 update_psw_addr(s);
2095 gen_op_calc_cc(s);
2097 gen_helper_diag(cpu_env, r1, r3, func_code);
2099 tcg_temp_free_i32(func_code);
2100 tcg_temp_free_i32(r3);
2101 tcg_temp_free_i32(r1);
2102 return NO_EXIT;
2104 #endif
2106 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2108 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2109 return_low128(o->out);
2110 return NO_EXIT;
2113 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2115 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2116 return_low128(o->out);
2117 return NO_EXIT;
2120 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2122 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2123 return_low128(o->out);
2124 return NO_EXIT;
2127 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2129 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2130 return_low128(o->out);
2131 return NO_EXIT;
2134 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2136 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2137 return NO_EXIT;
2140 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2142 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2143 return NO_EXIT;
2146 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2148 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2149 return_low128(o->out2);
2150 return NO_EXIT;
2153 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2155 int r2 = get_field(s->fields, r2);
2156 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2157 return NO_EXIT;
2160 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2162 /* No cache information provided. */
2163 tcg_gen_movi_i64(o->out, -1);
2164 return NO_EXIT;
2167 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2169 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2170 return NO_EXIT;
2173 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2175 int r1 = get_field(s->fields, r1);
2176 int r2 = get_field(s->fields, r2);
2177 TCGv_i64 t = tcg_temp_new_i64();
2179 /* Note the "subsequently" in the PoO, which implies a defined result
2180 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2181 tcg_gen_shri_i64(t, psw_mask, 32);
2182 store_reg32_i64(r1, t);
2183 if (r2 != 0) {
2184 store_reg32_i64(r2, psw_mask);
2187 tcg_temp_free_i64(t);
2188 return NO_EXIT;
2191 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2193 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2194 tb->flags, (ab)use the tb->cs_base field as the address of
2195 the template in memory, and grab 8 bits of tb->flags/cflags for
2196 the contents of the register. We would then recognize all this
2197 in gen_intermediate_code_internal, generating code for exactly
2198 one instruction. This new TB then gets executed normally.
2200 On the other hand, this seems to be mostly used for modifying
2201 MVC inside of memcpy, which needs a helper call anyway. So
2202 perhaps this doesn't bear thinking about any further. */
2204 TCGv_i64 tmp;
2206 update_psw_addr(s);
2207 gen_op_calc_cc(s);
2209 tmp = tcg_const_i64(s->next_pc);
2210 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2211 tcg_temp_free_i64(tmp);
2213 return NO_EXIT;
2216 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2218 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2219 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2220 tcg_temp_free_i32(m3);
2221 return NO_EXIT;
2224 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2226 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2227 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2228 tcg_temp_free_i32(m3);
2229 return NO_EXIT;
2232 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2234 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2235 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2236 return_low128(o->out2);
2237 tcg_temp_free_i32(m3);
2238 return NO_EXIT;
2241 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2243 /* We'll use the original input for cc computation, since we get to
2244 compare that against 0, which ought to be better than comparing
2245 the real output against 64. It also lets cc_dst be a convenient
2246 temporary during our computation. */
2247 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2249 /* R1 = IN ? CLZ(IN) : 64. */
2250 gen_helper_clz(o->out, o->in2);
2252 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2253 value by 64, which is undefined. But since the shift is 64 iff the
2254 input is zero, we still get the correct result after and'ing. */
2255 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2256 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2257 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2258 return NO_EXIT;
2261 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2263 int m3 = get_field(s->fields, m3);
2264 int pos, len, base = s->insn->data;
2265 TCGv_i64 tmp = tcg_temp_new_i64();
2266 uint64_t ccm;
2268 switch (m3) {
2269 case 0xf:
2270 /* Effectively a 32-bit load. */
2271 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2272 len = 32;
2273 goto one_insert;
2275 case 0xc:
2276 case 0x6:
2277 case 0x3:
2278 /* Effectively a 16-bit load. */
2279 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2280 len = 16;
2281 goto one_insert;
2283 case 0x8:
2284 case 0x4:
2285 case 0x2:
2286 case 0x1:
2287 /* Effectively an 8-bit load. */
2288 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2289 len = 8;
2290 goto one_insert;
2292 one_insert:
2293 pos = base + ctz32(m3) * 8;
2294 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2295 ccm = ((1ull << len) - 1) << pos;
2296 break;
2298 default:
2299 /* This is going to be a sequence of loads and inserts. */
2300 pos = base + 32 - 8;
2301 ccm = 0;
2302 while (m3) {
2303 if (m3 & 0x8) {
2304 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2305 tcg_gen_addi_i64(o->in2, o->in2, 1);
2306 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2307 ccm |= 0xff << pos;
2309 m3 = (m3 << 1) & 0xf;
2310 pos -= 8;
2312 break;
2315 tcg_gen_movi_i64(tmp, ccm);
2316 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2317 tcg_temp_free_i64(tmp);
2318 return NO_EXIT;
2321 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2323 int shift = s->insn->data & 0xff;
2324 int size = s->insn->data >> 8;
2325 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2326 return NO_EXIT;
2329 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2331 TCGv_i64 t1;
2333 gen_op_calc_cc(s);
2334 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2336 t1 = tcg_temp_new_i64();
2337 tcg_gen_shli_i64(t1, psw_mask, 20);
2338 tcg_gen_shri_i64(t1, t1, 36);
2339 tcg_gen_or_i64(o->out, o->out, t1);
2341 tcg_gen_extu_i32_i64(t1, cc_op);
2342 tcg_gen_shli_i64(t1, t1, 28);
2343 tcg_gen_or_i64(o->out, o->out, t1);
2344 tcg_temp_free_i64(t1);
2345 return NO_EXIT;
2348 #ifndef CONFIG_USER_ONLY
2349 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2351 check_privileged(s);
2352 gen_helper_ipte(cpu_env, o->in1, o->in2);
2353 return NO_EXIT;
2356 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2358 check_privileged(s);
2359 gen_helper_iske(o->out, cpu_env, o->in2);
2360 return NO_EXIT;
2362 #endif
2364 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2366 gen_helper_ldeb(o->out, cpu_env, o->in2);
2367 return NO_EXIT;
2370 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2372 gen_helper_ledb(o->out, cpu_env, o->in2);
2373 return NO_EXIT;
2376 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2378 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2379 return NO_EXIT;
2382 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2384 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2385 return NO_EXIT;
2388 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2390 gen_helper_lxdb(o->out, cpu_env, o->in2);
2391 return_low128(o->out2);
2392 return NO_EXIT;
2395 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2397 gen_helper_lxeb(o->out, cpu_env, o->in2);
2398 return_low128(o->out2);
2399 return NO_EXIT;
2402 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2404 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2405 return NO_EXIT;
2408 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2410 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2411 return NO_EXIT;
2414 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2416 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2417 return NO_EXIT;
2420 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2422 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2423 return NO_EXIT;
2426 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2428 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2429 return NO_EXIT;
2432 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2434 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2435 return NO_EXIT;
2438 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2440 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2441 return NO_EXIT;
2444 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2446 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2447 return NO_EXIT;
2450 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2452 TCGLabel *lab = gen_new_label();
2453 store_reg32_i64(get_field(s->fields, r1), o->in2);
2454 /* The value is stored even in case of trap. */
2455 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2456 gen_trap(s);
2457 gen_set_label(lab);
2458 return NO_EXIT;
2461 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2463 TCGLabel *lab = gen_new_label();
2464 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2465 /* The value is stored even in case of trap. */
2466 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2467 gen_trap(s);
2468 gen_set_label(lab);
2469 return NO_EXIT;
2472 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2474 TCGLabel *lab = gen_new_label();
2475 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2476 /* The value is stored even in case of trap. */
2477 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2478 gen_trap(s);
2479 gen_set_label(lab);
2480 return NO_EXIT;
2483 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2485 TCGLabel *lab = gen_new_label();
2486 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2487 /* The value is stored even in case of trap. */
2488 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2489 gen_trap(s);
2490 gen_set_label(lab);
2491 return NO_EXIT;
2494 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2496 TCGLabel *lab = gen_new_label();
2497 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2498 /* The value is stored even in case of trap. */
2499 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2500 gen_trap(s);
2501 gen_set_label(lab);
2502 return NO_EXIT;
2505 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2507 DisasCompare c;
2509 disas_jcc(s, &c, get_field(s->fields, m3));
2511 if (c.is_64) {
2512 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2513 o->in2, o->in1);
2514 free_compare(&c);
2515 } else {
2516 TCGv_i32 t32 = tcg_temp_new_i32();
2517 TCGv_i64 t, z;
2519 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2520 free_compare(&c);
2522 t = tcg_temp_new_i64();
2523 tcg_gen_extu_i32_i64(t, t32);
2524 tcg_temp_free_i32(t32);
2526 z = tcg_const_i64(0);
2527 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2528 tcg_temp_free_i64(t);
2529 tcg_temp_free_i64(z);
2532 return NO_EXIT;
2535 #ifndef CONFIG_USER_ONLY
2536 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2538 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2539 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2540 check_privileged(s);
2541 potential_page_fault(s);
2542 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2543 tcg_temp_free_i32(r1);
2544 tcg_temp_free_i32(r3);
2545 return NO_EXIT;
2548 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2550 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2551 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2552 check_privileged(s);
2553 potential_page_fault(s);
2554 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2555 tcg_temp_free_i32(r1);
2556 tcg_temp_free_i32(r3);
2557 return NO_EXIT;
2559 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2561 check_privileged(s);
2562 potential_page_fault(s);
2563 gen_helper_lra(o->out, cpu_env, o->in2);
2564 set_cc_static(s);
2565 return NO_EXIT;
2568 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2570 TCGv_i64 t1, t2;
2572 check_privileged(s);
2573 per_breaking_event(s);
2575 t1 = tcg_temp_new_i64();
2576 t2 = tcg_temp_new_i64();
2577 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2578 tcg_gen_addi_i64(o->in2, o->in2, 4);
2579 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2580 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2581 tcg_gen_shli_i64(t1, t1, 32);
2582 gen_helper_load_psw(cpu_env, t1, t2);
2583 tcg_temp_free_i64(t1);
2584 tcg_temp_free_i64(t2);
2585 return EXIT_NORETURN;
2588 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2590 TCGv_i64 t1, t2;
2592 check_privileged(s);
2593 per_breaking_event(s);
2595 t1 = tcg_temp_new_i64();
2596 t2 = tcg_temp_new_i64();
2597 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2598 tcg_gen_addi_i64(o->in2, o->in2, 8);
2599 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2600 gen_helper_load_psw(cpu_env, t1, t2);
2601 tcg_temp_free_i64(t1);
2602 tcg_temp_free_i64(t2);
2603 return EXIT_NORETURN;
2605 #endif
2607 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2609 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2610 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2611 potential_page_fault(s);
2612 gen_helper_lam(cpu_env, r1, o->in2, r3);
2613 tcg_temp_free_i32(r1);
2614 tcg_temp_free_i32(r3);
2615 return NO_EXIT;
2618 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2620 int r1 = get_field(s->fields, r1);
2621 int r3 = get_field(s->fields, r3);
2622 TCGv_i64 t1, t2;
2624 /* Only one register to read. */
2625 t1 = tcg_temp_new_i64();
2626 if (unlikely(r1 == r3)) {
2627 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2628 store_reg32_i64(r1, t1);
2629 tcg_temp_free(t1);
2630 return NO_EXIT;
2633 /* First load the values of the first and last registers to trigger
2634 possible page faults. */
2635 t2 = tcg_temp_new_i64();
2636 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2637 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2638 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2639 store_reg32_i64(r1, t1);
2640 store_reg32_i64(r3, t2);
2642 /* Only two registers to read. */
2643 if (((r1 + 1) & 15) == r3) {
2644 tcg_temp_free(t2);
2645 tcg_temp_free(t1);
2646 return NO_EXIT;
2649 /* Then load the remaining registers. Page fault can't occur. */
2650 r3 = (r3 - 1) & 15;
2651 tcg_gen_movi_i64(t2, 4);
2652 while (r1 != r3) {
2653 r1 = (r1 + 1) & 15;
2654 tcg_gen_add_i64(o->in2, o->in2, t2);
2655 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2656 store_reg32_i64(r1, t1);
2658 tcg_temp_free(t2);
2659 tcg_temp_free(t1);
2661 return NO_EXIT;
2664 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2666 int r1 = get_field(s->fields, r1);
2667 int r3 = get_field(s->fields, r3);
2668 TCGv_i64 t1, t2;
2670 /* Only one register to read. */
2671 t1 = tcg_temp_new_i64();
2672 if (unlikely(r1 == r3)) {
2673 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2674 store_reg32h_i64(r1, t1);
2675 tcg_temp_free(t1);
2676 return NO_EXIT;
2679 /* First load the values of the first and last registers to trigger
2680 possible page faults. */
2681 t2 = tcg_temp_new_i64();
2682 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2683 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2684 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2685 store_reg32h_i64(r1, t1);
2686 store_reg32h_i64(r3, t2);
2688 /* Only two registers to read. */
2689 if (((r1 + 1) & 15) == r3) {
2690 tcg_temp_free(t2);
2691 tcg_temp_free(t1);
2692 return NO_EXIT;
2695 /* Then load the remaining registers. Page fault can't occur. */
2696 r3 = (r3 - 1) & 15;
2697 tcg_gen_movi_i64(t2, 4);
2698 while (r1 != r3) {
2699 r1 = (r1 + 1) & 15;
2700 tcg_gen_add_i64(o->in2, o->in2, t2);
2701 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2702 store_reg32h_i64(r1, t1);
2704 tcg_temp_free(t2);
2705 tcg_temp_free(t1);
2707 return NO_EXIT;
2710 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2712 int r1 = get_field(s->fields, r1);
2713 int r3 = get_field(s->fields, r3);
2714 TCGv_i64 t1, t2;
2716 /* Only one register to read. */
2717 if (unlikely(r1 == r3)) {
2718 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2719 return NO_EXIT;
2722 /* First load the values of the first and last registers to trigger
2723 possible page faults. */
2724 t1 = tcg_temp_new_i64();
2725 t2 = tcg_temp_new_i64();
2726 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2727 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2728 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2729 tcg_gen_mov_i64(regs[r1], t1);
2730 tcg_temp_free(t2);
2732 /* Only two registers to read. */
2733 if (((r1 + 1) & 15) == r3) {
2734 tcg_temp_free(t1);
2735 return NO_EXIT;
2738 /* Then load the remaining registers. Page fault can't occur. */
2739 r3 = (r3 - 1) & 15;
2740 tcg_gen_movi_i64(t1, 8);
2741 while (r1 != r3) {
2742 r1 = (r1 + 1) & 15;
2743 tcg_gen_add_i64(o->in2, o->in2, t1);
2744 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2746 tcg_temp_free(t1);
2748 return NO_EXIT;
2751 #ifndef CONFIG_USER_ONLY
2752 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2754 check_privileged(s);
2755 potential_page_fault(s);
2756 gen_helper_lura(o->out, cpu_env, o->in2);
2757 return NO_EXIT;
2760 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2762 check_privileged(s);
2763 potential_page_fault(s);
2764 gen_helper_lurag(o->out, cpu_env, o->in2);
2765 return NO_EXIT;
2767 #endif
2769 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2771 o->out = o->in2;
2772 o->g_out = o->g_in2;
2773 TCGV_UNUSED_I64(o->in2);
2774 o->g_in2 = false;
2775 return NO_EXIT;
2778 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2780 int b2 = get_field(s->fields, b2);
2781 TCGv ar1 = tcg_temp_new_i64();
2783 o->out = o->in2;
2784 o->g_out = o->g_in2;
2785 TCGV_UNUSED_I64(o->in2);
2786 o->g_in2 = false;
2788 switch (s->tb->flags & FLAG_MASK_ASC) {
2789 case PSW_ASC_PRIMARY >> 32:
2790 tcg_gen_movi_i64(ar1, 0);
2791 break;
2792 case PSW_ASC_ACCREG >> 32:
2793 tcg_gen_movi_i64(ar1, 1);
2794 break;
2795 case PSW_ASC_SECONDARY >> 32:
2796 if (b2) {
2797 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2798 } else {
2799 tcg_gen_movi_i64(ar1, 0);
2801 break;
2802 case PSW_ASC_HOME >> 32:
2803 tcg_gen_movi_i64(ar1, 2);
2804 break;
2807 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2808 tcg_temp_free_i64(ar1);
2810 return NO_EXIT;
2813 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2815 o->out = o->in1;
2816 o->out2 = o->in2;
2817 o->g_out = o->g_in1;
2818 o->g_out2 = o->g_in2;
2819 TCGV_UNUSED_I64(o->in1);
2820 TCGV_UNUSED_I64(o->in2);
2821 o->g_in1 = o->g_in2 = false;
2822 return NO_EXIT;
2825 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2827 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2828 potential_page_fault(s);
2829 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2830 tcg_temp_free_i32(l);
2831 return NO_EXIT;
2834 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2836 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2837 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2838 potential_page_fault(s);
2839 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2840 tcg_temp_free_i32(r1);
2841 tcg_temp_free_i32(r2);
2842 set_cc_static(s);
2843 return NO_EXIT;
2846 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2848 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2849 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2850 potential_page_fault(s);
2851 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2852 tcg_temp_free_i32(r1);
2853 tcg_temp_free_i32(r3);
2854 set_cc_static(s);
2855 return NO_EXIT;
2858 #ifndef CONFIG_USER_ONLY
2859 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2861 int r1 = get_field(s->fields, l1);
2862 check_privileged(s);
2863 potential_page_fault(s);
2864 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2865 set_cc_static(s);
2866 return NO_EXIT;
2869 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2871 int r1 = get_field(s->fields, l1);
2872 check_privileged(s);
2873 potential_page_fault(s);
2874 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2875 set_cc_static(s);
2876 return NO_EXIT;
2878 #endif
2880 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2882 potential_page_fault(s);
2883 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2884 set_cc_static(s);
2885 return NO_EXIT;
2888 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2890 potential_page_fault(s);
2891 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2892 set_cc_static(s);
2893 return_low128(o->in2);
2894 return NO_EXIT;
2897 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2899 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2900 return NO_EXIT;
2903 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2905 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2906 return NO_EXIT;
2909 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2911 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2912 return NO_EXIT;
2915 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2917 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2918 return NO_EXIT;
2921 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2923 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2924 return NO_EXIT;
2927 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2929 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2930 return_low128(o->out2);
2931 return NO_EXIT;
2934 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2936 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2937 return_low128(o->out2);
2938 return NO_EXIT;
2941 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2943 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2944 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2945 tcg_temp_free_i64(r3);
2946 return NO_EXIT;
2949 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2951 int r3 = get_field(s->fields, r3);
2952 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2953 return NO_EXIT;
2956 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2958 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2959 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2960 tcg_temp_free_i64(r3);
2961 return NO_EXIT;
2964 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2966 int r3 = get_field(s->fields, r3);
2967 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2968 return NO_EXIT;
2971 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2973 TCGv_i64 z, n;
2974 z = tcg_const_i64(0);
2975 n = tcg_temp_new_i64();
2976 tcg_gen_neg_i64(n, o->in2);
2977 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2978 tcg_temp_free_i64(n);
2979 tcg_temp_free_i64(z);
2980 return NO_EXIT;
2983 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2985 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2986 return NO_EXIT;
2989 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2991 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2992 return NO_EXIT;
2995 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2997 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2998 tcg_gen_mov_i64(o->out2, o->in2);
2999 return NO_EXIT;
3002 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3004 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3005 potential_page_fault(s);
3006 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3007 tcg_temp_free_i32(l);
3008 set_cc_static(s);
3009 return NO_EXIT;
3012 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3014 tcg_gen_neg_i64(o->out, o->in2);
3015 return NO_EXIT;
3018 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3020 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3021 return NO_EXIT;
3024 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3026 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3027 return NO_EXIT;
3030 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3032 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3033 tcg_gen_mov_i64(o->out2, o->in2);
3034 return NO_EXIT;
3037 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3039 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3040 potential_page_fault(s);
3041 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3042 tcg_temp_free_i32(l);
3043 set_cc_static(s);
3044 return NO_EXIT;
3047 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3049 tcg_gen_or_i64(o->out, o->in1, o->in2);
3050 return NO_EXIT;
3053 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3055 int shift = s->insn->data & 0xff;
3056 int size = s->insn->data >> 8;
3057 uint64_t mask = ((1ull << size) - 1) << shift;
3059 assert(!o->g_in2);
3060 tcg_gen_shli_i64(o->in2, o->in2, shift);
3061 tcg_gen_or_i64(o->out, o->in1, o->in2);
3063 /* Produce the CC from only the bits manipulated. */
3064 tcg_gen_andi_i64(cc_dst, o->out, mask);
3065 set_cc_nz_u64(s, cc_dst);
3066 return NO_EXIT;
3069 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3071 gen_helper_popcnt(o->out, o->in2);
3072 return NO_EXIT;
3075 #ifndef CONFIG_USER_ONLY
3076 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3078 check_privileged(s);
3079 gen_helper_ptlb(cpu_env);
3080 return NO_EXIT;
3082 #endif
3084 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3086 int i3 = get_field(s->fields, i3);
3087 int i4 = get_field(s->fields, i4);
3088 int i5 = get_field(s->fields, i5);
3089 int do_zero = i4 & 0x80;
3090 uint64_t mask, imask, pmask;
3091 int pos, len, rot;
3093 /* Adjust the arguments for the specific insn. */
3094 switch (s->fields->op2) {
3095 case 0x55: /* risbg */
3096 i3 &= 63;
3097 i4 &= 63;
3098 pmask = ~0;
3099 break;
3100 case 0x5d: /* risbhg */
3101 i3 &= 31;
3102 i4 &= 31;
3103 pmask = 0xffffffff00000000ull;
3104 break;
3105 case 0x51: /* risblg */
3106 i3 &= 31;
3107 i4 &= 31;
3108 pmask = 0x00000000ffffffffull;
3109 break;
3110 default:
3111 abort();
3114 /* MASK is the set of bits to be inserted from R2.
3115 Take care for I3/I4 wraparound. */
3116 mask = pmask >> i3;
3117 if (i3 <= i4) {
3118 mask ^= pmask >> i4 >> 1;
3119 } else {
3120 mask |= ~(pmask >> i4 >> 1);
3122 mask &= pmask;
3124 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3125 insns, we need to keep the other half of the register. */
3126 imask = ~mask | ~pmask;
3127 if (do_zero) {
3128 if (s->fields->op2 == 0x55) {
3129 imask = 0;
3130 } else {
3131 imask = ~pmask;
3135 /* In some cases we can implement this with deposit, which can be more
3136 efficient on some hosts. */
3137 if (~mask == imask && i3 <= i4) {
3138 if (s->fields->op2 == 0x5d) {
3139 i3 += 32, i4 += 32;
3141 /* Note that we rotate the bits to be inserted to the lsb, not to
3142 the position as described in the PoO. */
3143 len = i4 - i3 + 1;
3144 pos = 63 - i4;
3145 rot = (i5 - pos) & 63;
3146 } else {
3147 pos = len = -1;
3148 rot = i5 & 63;
3151 /* Rotate the input as necessary. */
3152 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3154 /* Insert the selected bits into the output. */
3155 if (pos >= 0) {
3156 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3157 } else if (imask == 0) {
3158 tcg_gen_andi_i64(o->out, o->in2, mask);
3159 } else {
3160 tcg_gen_andi_i64(o->in2, o->in2, mask);
3161 tcg_gen_andi_i64(o->out, o->out, imask);
3162 tcg_gen_or_i64(o->out, o->out, o->in2);
3164 return NO_EXIT;
3167 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3169 int i3 = get_field(s->fields, i3);
3170 int i4 = get_field(s->fields, i4);
3171 int i5 = get_field(s->fields, i5);
3172 uint64_t mask;
3174 /* If this is a test-only form, arrange to discard the result. */
3175 if (i3 & 0x80) {
3176 o->out = tcg_temp_new_i64();
3177 o->g_out = false;
3180 i3 &= 63;
3181 i4 &= 63;
3182 i5 &= 63;
3184 /* MASK is the set of bits to be operated on from R2.
3185 Take care for I3/I4 wraparound. */
3186 mask = ~0ull >> i3;
3187 if (i3 <= i4) {
3188 mask ^= ~0ull >> i4 >> 1;
3189 } else {
3190 mask |= ~(~0ull >> i4 >> 1);
3193 /* Rotate the input as necessary. */
3194 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3196 /* Operate. */
3197 switch (s->fields->op2) {
3198 case 0x55: /* AND */
3199 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3200 tcg_gen_and_i64(o->out, o->out, o->in2);
3201 break;
3202 case 0x56: /* OR */
3203 tcg_gen_andi_i64(o->in2, o->in2, mask);
3204 tcg_gen_or_i64(o->out, o->out, o->in2);
3205 break;
3206 case 0x57: /* XOR */
3207 tcg_gen_andi_i64(o->in2, o->in2, mask);
3208 tcg_gen_xor_i64(o->out, o->out, o->in2);
3209 break;
3210 default:
3211 abort();
3214 /* Set the CC. */
3215 tcg_gen_andi_i64(cc_dst, o->out, mask);
3216 set_cc_nz_u64(s, cc_dst);
3217 return NO_EXIT;
3220 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3222 tcg_gen_bswap16_i64(o->out, o->in2);
3223 return NO_EXIT;
3226 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3228 tcg_gen_bswap32_i64(o->out, o->in2);
3229 return NO_EXIT;
3232 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3234 tcg_gen_bswap64_i64(o->out, o->in2);
3235 return NO_EXIT;
3238 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3240 TCGv_i32 t1 = tcg_temp_new_i32();
3241 TCGv_i32 t2 = tcg_temp_new_i32();
3242 TCGv_i32 to = tcg_temp_new_i32();
3243 tcg_gen_extrl_i64_i32(t1, o->in1);
3244 tcg_gen_extrl_i64_i32(t2, o->in2);
3245 tcg_gen_rotl_i32(to, t1, t2);
3246 tcg_gen_extu_i32_i64(o->out, to);
3247 tcg_temp_free_i32(t1);
3248 tcg_temp_free_i32(t2);
3249 tcg_temp_free_i32(to);
3250 return NO_EXIT;
3253 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3255 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3256 return NO_EXIT;
3259 #ifndef CONFIG_USER_ONLY
3260 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3262 check_privileged(s);
3263 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3264 set_cc_static(s);
3265 return NO_EXIT;
3268 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3270 check_privileged(s);
3271 gen_helper_sacf(cpu_env, o->in2);
3272 /* Addressing mode has changed, so end the block. */
3273 return EXIT_PC_STALE;
3275 #endif
3277 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3279 int sam = s->insn->data;
3280 TCGv_i64 tsam;
3281 uint64_t mask;
3283 switch (sam) {
3284 case 0:
3285 mask = 0xffffff;
3286 break;
3287 case 1:
3288 mask = 0x7fffffff;
3289 break;
3290 default:
3291 mask = -1;
3292 break;
3295 /* Bizarre but true, we check the address of the current insn for the
3296 specification exception, not the next to be executed. Thus the PoO
3297 documents that Bad Things Happen two bytes before the end. */
3298 if (s->pc & ~mask) {
3299 gen_program_exception(s, PGM_SPECIFICATION);
3300 return EXIT_NORETURN;
3302 s->next_pc &= mask;
3304 tsam = tcg_const_i64(sam);
3305 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3306 tcg_temp_free_i64(tsam);
3308 /* Always exit the TB, since we (may have) changed execution mode. */
3309 return EXIT_PC_STALE;
3312 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3314 int r1 = get_field(s->fields, r1);
3315 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3316 return NO_EXIT;
3319 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3321 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3322 return NO_EXIT;
3325 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3327 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3328 return NO_EXIT;
3331 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3333 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3334 return_low128(o->out2);
3335 return NO_EXIT;
3338 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3340 gen_helper_sqeb(o->out, cpu_env, o->in2);
3341 return NO_EXIT;
3344 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3346 gen_helper_sqdb(o->out, cpu_env, o->in2);
3347 return NO_EXIT;
3350 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3352 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3353 return_low128(o->out2);
3354 return NO_EXIT;
3357 #ifndef CONFIG_USER_ONLY
3358 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3360 check_privileged(s);
3361 potential_page_fault(s);
3362 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3363 set_cc_static(s);
3364 return NO_EXIT;
3367 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3369 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3370 check_privileged(s);
3371 potential_page_fault(s);
3372 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3373 tcg_temp_free_i32(r1);
3374 return NO_EXIT;
3376 #endif
3378 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3380 DisasCompare c;
3381 TCGv_i64 a;
3382 TCGLabel *lab;
3383 int r1;
3385 disas_jcc(s, &c, get_field(s->fields, m3));
3387 /* We want to store when the condition is fulfilled, so branch
3388 out when it's not */
3389 c.cond = tcg_invert_cond(c.cond);
3391 lab = gen_new_label();
3392 if (c.is_64) {
3393 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3394 } else {
3395 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3397 free_compare(&c);
3399 r1 = get_field(s->fields, r1);
3400 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3401 if (s->insn->data) {
3402 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3403 } else {
3404 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3406 tcg_temp_free_i64(a);
3408 gen_set_label(lab);
3409 return NO_EXIT;
3412 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3414 uint64_t sign = 1ull << s->insn->data;
3415 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3416 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3417 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3418 /* The arithmetic left shift is curious in that it does not affect
3419 the sign bit. Copy that over from the source unchanged. */
3420 tcg_gen_andi_i64(o->out, o->out, ~sign);
3421 tcg_gen_andi_i64(o->in1, o->in1, sign);
3422 tcg_gen_or_i64(o->out, o->out, o->in1);
3423 return NO_EXIT;
3426 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3428 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3429 return NO_EXIT;
3432 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3434 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3435 return NO_EXIT;
3438 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3440 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3441 return NO_EXIT;
3444 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3446 gen_helper_sfpc(cpu_env, o->in2);
3447 return NO_EXIT;
3450 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3452 gen_helper_sfas(cpu_env, o->in2);
3453 return NO_EXIT;
3456 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3458 int b2 = get_field(s->fields, b2);
3459 int d2 = get_field(s->fields, d2);
3460 TCGv_i64 t1 = tcg_temp_new_i64();
3461 TCGv_i64 t2 = tcg_temp_new_i64();
3462 int mask, pos, len;
3464 switch (s->fields->op2) {
3465 case 0x99: /* SRNM */
3466 pos = 0, len = 2;
3467 break;
3468 case 0xb8: /* SRNMB */
3469 pos = 0, len = 3;
3470 break;
3471 case 0xb9: /* SRNMT */
3472 pos = 4, len = 3;
3473 break;
3474 default:
3475 tcg_abort();
3477 mask = (1 << len) - 1;
3479 /* Insert the value into the appropriate field of the FPC. */
3480 if (b2 == 0) {
3481 tcg_gen_movi_i64(t1, d2 & mask);
3482 } else {
3483 tcg_gen_addi_i64(t1, regs[b2], d2);
3484 tcg_gen_andi_i64(t1, t1, mask);
3486 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3487 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3488 tcg_temp_free_i64(t1);
3490 /* Then install the new FPC to set the rounding mode in fpu_status. */
3491 gen_helper_sfpc(cpu_env, t2);
3492 tcg_temp_free_i64(t2);
3493 return NO_EXIT;
3496 #ifndef CONFIG_USER_ONLY
3497 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3499 check_privileged(s);
3500 tcg_gen_shri_i64(o->in2, o->in2, 4);
3501 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3502 return NO_EXIT;
3505 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3507 check_privileged(s);
3508 gen_helper_sske(cpu_env, o->in1, o->in2);
3509 return NO_EXIT;
3512 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3514 check_privileged(s);
3515 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3516 return NO_EXIT;
3519 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3521 check_privileged(s);
3522 /* ??? Surely cpu address != cpu number. In any case the previous
3523 version of this stored more than the required half-word, so it
3524 is unlikely this has ever been tested. */
3525 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3526 return NO_EXIT;
3529 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3531 gen_helper_stck(o->out, cpu_env);
3532 /* ??? We don't implement clock states. */
3533 gen_op_movi_cc(s, 0);
3534 return NO_EXIT;
3537 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3539 TCGv_i64 c1 = tcg_temp_new_i64();
3540 TCGv_i64 c2 = tcg_temp_new_i64();
3541 gen_helper_stck(c1, cpu_env);
3542 /* Shift the 64-bit value into its place as a zero-extended
3543 104-bit value. Note that "bit positions 64-103 are always
3544 non-zero so that they compare differently to STCK"; we set
3545 the least significant bit to 1. */
3546 tcg_gen_shli_i64(c2, c1, 56);
3547 tcg_gen_shri_i64(c1, c1, 8);
3548 tcg_gen_ori_i64(c2, c2, 0x10000);
3549 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3550 tcg_gen_addi_i64(o->in2, o->in2, 8);
3551 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3552 tcg_temp_free_i64(c1);
3553 tcg_temp_free_i64(c2);
3554 /* ??? We don't implement clock states. */
3555 gen_op_movi_cc(s, 0);
3556 return NO_EXIT;
3559 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3561 check_privileged(s);
3562 gen_helper_sckc(cpu_env, o->in2);
3563 return NO_EXIT;
3566 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3568 check_privileged(s);
3569 gen_helper_stckc(o->out, cpu_env);
3570 return NO_EXIT;
3573 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3575 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3576 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3577 check_privileged(s);
3578 potential_page_fault(s);
3579 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3580 tcg_temp_free_i32(r1);
3581 tcg_temp_free_i32(r3);
3582 return NO_EXIT;
3585 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3587 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3588 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3589 check_privileged(s);
3590 potential_page_fault(s);
3591 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3592 tcg_temp_free_i32(r1);
3593 tcg_temp_free_i32(r3);
3594 return NO_EXIT;
3597 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3599 TCGv_i64 t1 = tcg_temp_new_i64();
3601 check_privileged(s);
3602 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3603 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3604 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3605 tcg_temp_free_i64(t1);
3607 return NO_EXIT;
3610 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3612 check_privileged(s);
3613 gen_helper_spt(cpu_env, o->in2);
3614 return NO_EXIT;
3617 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3619 TCGv_i64 f, a;
3620 /* We really ought to have more complete indication of facilities
3621 that we implement. Address this when STFLE is implemented. */
3622 check_privileged(s);
3623 f = tcg_const_i64(0xc0000000);
3624 a = tcg_const_i64(200);
3625 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3626 tcg_temp_free_i64(f);
3627 tcg_temp_free_i64(a);
3628 return NO_EXIT;
3631 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3633 check_privileged(s);
3634 gen_helper_stpt(o->out, cpu_env);
3635 return NO_EXIT;
3638 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3640 check_privileged(s);
3641 potential_page_fault(s);
3642 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3643 set_cc_static(s);
3644 return NO_EXIT;
3647 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3649 check_privileged(s);
3650 gen_helper_spx(cpu_env, o->in2);
3651 return NO_EXIT;
3654 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3656 check_privileged(s);
3657 potential_page_fault(s);
3658 gen_helper_xsch(cpu_env, regs[1]);
3659 set_cc_static(s);
3660 return NO_EXIT;
3663 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3665 check_privileged(s);
3666 potential_page_fault(s);
3667 gen_helper_csch(cpu_env, regs[1]);
3668 set_cc_static(s);
3669 return NO_EXIT;
3672 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3674 check_privileged(s);
3675 potential_page_fault(s);
3676 gen_helper_hsch(cpu_env, regs[1]);
3677 set_cc_static(s);
3678 return NO_EXIT;
3681 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3683 check_privileged(s);
3684 potential_page_fault(s);
3685 gen_helper_msch(cpu_env, regs[1], o->in2);
3686 set_cc_static(s);
3687 return NO_EXIT;
3690 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3692 check_privileged(s);
3693 potential_page_fault(s);
3694 gen_helper_rchp(cpu_env, regs[1]);
3695 set_cc_static(s);
3696 return NO_EXIT;
3699 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3701 check_privileged(s);
3702 potential_page_fault(s);
3703 gen_helper_rsch(cpu_env, regs[1]);
3704 set_cc_static(s);
3705 return NO_EXIT;
3708 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3710 check_privileged(s);
3711 potential_page_fault(s);
3712 gen_helper_ssch(cpu_env, regs[1], o->in2);
3713 set_cc_static(s);
3714 return NO_EXIT;
3717 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3719 check_privileged(s);
3720 potential_page_fault(s);
3721 gen_helper_stsch(cpu_env, regs[1], o->in2);
3722 set_cc_static(s);
3723 return NO_EXIT;
3726 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3728 check_privileged(s);
3729 potential_page_fault(s);
3730 gen_helper_tsch(cpu_env, regs[1], o->in2);
3731 set_cc_static(s);
3732 return NO_EXIT;
3735 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3737 check_privileged(s);
3738 potential_page_fault(s);
3739 gen_helper_chsc(cpu_env, o->in2);
3740 set_cc_static(s);
3741 return NO_EXIT;
3744 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3746 check_privileged(s);
3747 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3748 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3749 return NO_EXIT;
3752 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3754 uint64_t i2 = get_field(s->fields, i2);
3755 TCGv_i64 t;
3757 check_privileged(s);
3759 /* It is important to do what the instruction name says: STORE THEN.
3760 If we let the output hook perform the store then if we fault and
3761 restart, we'll have the wrong SYSTEM MASK in place. */
3762 t = tcg_temp_new_i64();
3763 tcg_gen_shri_i64(t, psw_mask, 56);
3764 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3765 tcg_temp_free_i64(t);
3767 if (s->fields->op == 0xac) {
3768 tcg_gen_andi_i64(psw_mask, psw_mask,
3769 (i2 << 56) | 0x00ffffffffffffffull);
3770 } else {
3771 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3773 return NO_EXIT;
3776 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3778 check_privileged(s);
3779 potential_page_fault(s);
3780 gen_helper_stura(cpu_env, o->in2, o->in1);
3781 return NO_EXIT;
3784 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3786 check_privileged(s);
3787 potential_page_fault(s);
3788 gen_helper_sturg(cpu_env, o->in2, o->in1);
3789 return NO_EXIT;
3791 #endif
3793 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3795 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3796 return NO_EXIT;
3799 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3801 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3802 return NO_EXIT;
3805 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3807 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3808 return NO_EXIT;
3811 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3813 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3814 return NO_EXIT;
3817 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3819 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3820 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3821 potential_page_fault(s);
3822 gen_helper_stam(cpu_env, r1, o->in2, r3);
3823 tcg_temp_free_i32(r1);
3824 tcg_temp_free_i32(r3);
3825 return NO_EXIT;
3828 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3830 int m3 = get_field(s->fields, m3);
3831 int pos, base = s->insn->data;
3832 TCGv_i64 tmp = tcg_temp_new_i64();
3834 pos = base + ctz32(m3) * 8;
3835 switch (m3) {
3836 case 0xf:
3837 /* Effectively a 32-bit store. */
3838 tcg_gen_shri_i64(tmp, o->in1, pos);
3839 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3840 break;
3842 case 0xc:
3843 case 0x6:
3844 case 0x3:
3845 /* Effectively a 16-bit store. */
3846 tcg_gen_shri_i64(tmp, o->in1, pos);
3847 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3848 break;
3850 case 0x8:
3851 case 0x4:
3852 case 0x2:
3853 case 0x1:
3854 /* Effectively an 8-bit store. */
3855 tcg_gen_shri_i64(tmp, o->in1, pos);
3856 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3857 break;
3859 default:
3860 /* This is going to be a sequence of shifts and stores. */
3861 pos = base + 32 - 8;
3862 while (m3) {
3863 if (m3 & 0x8) {
3864 tcg_gen_shri_i64(tmp, o->in1, pos);
3865 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3866 tcg_gen_addi_i64(o->in2, o->in2, 1);
3868 m3 = (m3 << 1) & 0xf;
3869 pos -= 8;
3871 break;
3873 tcg_temp_free_i64(tmp);
3874 return NO_EXIT;
3877 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3879 int r1 = get_field(s->fields, r1);
3880 int r3 = get_field(s->fields, r3);
3881 int size = s->insn->data;
3882 TCGv_i64 tsize = tcg_const_i64(size);
3884 while (1) {
3885 if (size == 8) {
3886 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3887 } else {
3888 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3890 if (r1 == r3) {
3891 break;
3893 tcg_gen_add_i64(o->in2, o->in2, tsize);
3894 r1 = (r1 + 1) & 15;
3897 tcg_temp_free_i64(tsize);
3898 return NO_EXIT;
3901 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3903 int r1 = get_field(s->fields, r1);
3904 int r3 = get_field(s->fields, r3);
3905 TCGv_i64 t = tcg_temp_new_i64();
3906 TCGv_i64 t4 = tcg_const_i64(4);
3907 TCGv_i64 t32 = tcg_const_i64(32);
3909 while (1) {
3910 tcg_gen_shl_i64(t, regs[r1], t32);
3911 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3912 if (r1 == r3) {
3913 break;
3915 tcg_gen_add_i64(o->in2, o->in2, t4);
3916 r1 = (r1 + 1) & 15;
3919 tcg_temp_free_i64(t);
3920 tcg_temp_free_i64(t4);
3921 tcg_temp_free_i64(t32);
3922 return NO_EXIT;
3925 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3927 potential_page_fault(s);
3928 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3929 set_cc_static(s);
3930 return_low128(o->in2);
3931 return NO_EXIT;
3934 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3936 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3937 return NO_EXIT;
3940 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3942 DisasCompare cmp;
3943 TCGv_i64 borrow;
3945 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3947 /* The !borrow flag is the msb of CC. Since we want the inverse of
3948 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3949 disas_jcc(s, &cmp, 8 | 4);
3950 borrow = tcg_temp_new_i64();
3951 if (cmp.is_64) {
3952 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3953 } else {
3954 TCGv_i32 t = tcg_temp_new_i32();
3955 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3956 tcg_gen_extu_i32_i64(borrow, t);
3957 tcg_temp_free_i32(t);
3959 free_compare(&cmp);
3961 tcg_gen_sub_i64(o->out, o->out, borrow);
3962 tcg_temp_free_i64(borrow);
3963 return NO_EXIT;
3966 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3968 TCGv_i32 t;
3970 update_psw_addr(s);
3971 update_cc_op(s);
3973 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3974 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3975 tcg_temp_free_i32(t);
3977 t = tcg_const_i32(s->next_pc - s->pc);
3978 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3979 tcg_temp_free_i32(t);
3981 gen_exception(EXCP_SVC);
3982 return EXIT_NORETURN;
3985 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3987 gen_helper_tceb(cc_op, o->in1, o->in2);
3988 set_cc_static(s);
3989 return NO_EXIT;
3992 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3994 gen_helper_tcdb(cc_op, o->in1, o->in2);
3995 set_cc_static(s);
3996 return NO_EXIT;
3999 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4001 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
4002 set_cc_static(s);
4003 return NO_EXIT;
4006 #ifndef CONFIG_USER_ONLY
4007 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4009 potential_page_fault(s);
4010 gen_helper_tprot(cc_op, o->addr1, o->in2);
4011 set_cc_static(s);
4012 return NO_EXIT;
4014 #endif
4016 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4018 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4019 potential_page_fault(s);
4020 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4021 tcg_temp_free_i32(l);
4022 set_cc_static(s);
4023 return NO_EXIT;
4026 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4028 potential_page_fault(s);
4029 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4030 return_low128(o->out2);
4031 set_cc_static(s);
4032 return NO_EXIT;
4035 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4037 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4038 potential_page_fault(s);
4039 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4040 tcg_temp_free_i32(l);
4041 set_cc_static(s);
4042 return NO_EXIT;
4045 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4047 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4048 potential_page_fault(s);
4049 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4050 tcg_temp_free_i32(l);
4051 return NO_EXIT;
4054 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4056 int d1 = get_field(s->fields, d1);
4057 int d2 = get_field(s->fields, d2);
4058 int b1 = get_field(s->fields, b1);
4059 int b2 = get_field(s->fields, b2);
4060 int l = get_field(s->fields, l1);
4061 TCGv_i32 t32;
4063 o->addr1 = get_address(s, 0, b1, d1);
4065 /* If the addresses are identical, this is a store/memset of zero. */
4066 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4067 o->in2 = tcg_const_i64(0);
4069 l++;
4070 while (l >= 8) {
4071 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4072 l -= 8;
4073 if (l > 0) {
4074 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4077 if (l >= 4) {
4078 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4079 l -= 4;
4080 if (l > 0) {
4081 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4084 if (l >= 2) {
4085 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4086 l -= 2;
4087 if (l > 0) {
4088 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4091 if (l) {
4092 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4094 gen_op_movi_cc(s, 0);
4095 return NO_EXIT;
4098 /* But in general we'll defer to a helper. */
4099 o->in2 = get_address(s, 0, b2, d2);
4100 t32 = tcg_const_i32(l);
4101 potential_page_fault(s);
4102 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4103 tcg_temp_free_i32(t32);
4104 set_cc_static(s);
4105 return NO_EXIT;
4108 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4110 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4111 return NO_EXIT;
4114 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4116 int shift = s->insn->data & 0xff;
4117 int size = s->insn->data >> 8;
4118 uint64_t mask = ((1ull << size) - 1) << shift;
4120 assert(!o->g_in2);
4121 tcg_gen_shli_i64(o->in2, o->in2, shift);
4122 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4124 /* Produce the CC from only the bits manipulated. */
4125 tcg_gen_andi_i64(cc_dst, o->out, mask);
4126 set_cc_nz_u64(s, cc_dst);
4127 return NO_EXIT;
4130 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4132 o->out = tcg_const_i64(0);
4133 return NO_EXIT;
4136 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4138 o->out = tcg_const_i64(0);
4139 o->out2 = o->out;
4140 o->g_out2 = true;
4141 return NO_EXIT;
4144 /* ====================================================================== */
4145 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4146 the original inputs), update the various cc data structures in order to
4147 be able to compute the new condition code. */
4149 static void cout_abs32(DisasContext *s, DisasOps *o)
4151 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4154 static void cout_abs64(DisasContext *s, DisasOps *o)
4156 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4159 static void cout_adds32(DisasContext *s, DisasOps *o)
4161 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4164 static void cout_adds64(DisasContext *s, DisasOps *o)
4166 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4169 static void cout_addu32(DisasContext *s, DisasOps *o)
4171 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4174 static void cout_addu64(DisasContext *s, DisasOps *o)
4176 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4179 static void cout_addc32(DisasContext *s, DisasOps *o)
4181 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4184 static void cout_addc64(DisasContext *s, DisasOps *o)
4186 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4189 static void cout_cmps32(DisasContext *s, DisasOps *o)
4191 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4194 static void cout_cmps64(DisasContext *s, DisasOps *o)
4196 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4199 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4201 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4204 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4206 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4209 static void cout_f32(DisasContext *s, DisasOps *o)
4211 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4214 static void cout_f64(DisasContext *s, DisasOps *o)
4216 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4219 static void cout_f128(DisasContext *s, DisasOps *o)
4221 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4224 static void cout_nabs32(DisasContext *s, DisasOps *o)
4226 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4229 static void cout_nabs64(DisasContext *s, DisasOps *o)
4231 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4234 static void cout_neg32(DisasContext *s, DisasOps *o)
4236 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4239 static void cout_neg64(DisasContext *s, DisasOps *o)
4241 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4244 static void cout_nz32(DisasContext *s, DisasOps *o)
4246 tcg_gen_ext32u_i64(cc_dst, o->out);
4247 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4250 static void cout_nz64(DisasContext *s, DisasOps *o)
4252 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4255 static void cout_s32(DisasContext *s, DisasOps *o)
4257 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4260 static void cout_s64(DisasContext *s, DisasOps *o)
4262 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4265 static void cout_subs32(DisasContext *s, DisasOps *o)
4267 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4270 static void cout_subs64(DisasContext *s, DisasOps *o)
4272 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4275 static void cout_subu32(DisasContext *s, DisasOps *o)
4277 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4280 static void cout_subu64(DisasContext *s, DisasOps *o)
4282 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4285 static void cout_subb32(DisasContext *s, DisasOps *o)
4287 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4290 static void cout_subb64(DisasContext *s, DisasOps *o)
4292 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4295 static void cout_tm32(DisasContext *s, DisasOps *o)
4297 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4300 static void cout_tm64(DisasContext *s, DisasOps *o)
4302 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4305 /* ====================================================================== */
4306 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4307 with the TCG register to which we will write. Used in combination with
4308 the "wout" generators, in some cases we need a new temporary, and in
4309 some cases we can write to a TCG global. */
4311 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4313 o->out = tcg_temp_new_i64();
4315 #define SPEC_prep_new 0
4317 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4319 o->out = tcg_temp_new_i64();
4320 o->out2 = tcg_temp_new_i64();
4322 #define SPEC_prep_new_P 0
4324 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4326 o->out = regs[get_field(f, r1)];
4327 o->g_out = true;
4329 #define SPEC_prep_r1 0
4331 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4333 int r1 = get_field(f, r1);
4334 o->out = regs[r1];
4335 o->out2 = regs[r1 + 1];
4336 o->g_out = o->g_out2 = true;
4338 #define SPEC_prep_r1_P SPEC_r1_even
4340 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4342 o->out = fregs[get_field(f, r1)];
4343 o->g_out = true;
4345 #define SPEC_prep_f1 0
4347 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4349 int r1 = get_field(f, r1);
4350 o->out = fregs[r1];
4351 o->out2 = fregs[r1 + 2];
4352 o->g_out = o->g_out2 = true;
4354 #define SPEC_prep_x1 SPEC_r1_f128
4356 /* ====================================================================== */
4357 /* The "Write OUTput" generators. These generally perform some non-trivial
4358 copy of data to TCG globals, or to main memory. The trivial cases are
4359 generally handled by having a "prep" generator install the TCG global
4360 as the destination of the operation. */
4362 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4364 store_reg(get_field(f, r1), o->out);
4366 #define SPEC_wout_r1 0
4368 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4370 int r1 = get_field(f, r1);
4371 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4373 #define SPEC_wout_r1_8 0
4375 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4377 int r1 = get_field(f, r1);
4378 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4380 #define SPEC_wout_r1_16 0
4382 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4384 store_reg32_i64(get_field(f, r1), o->out);
4386 #define SPEC_wout_r1_32 0
4388 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4390 store_reg32h_i64(get_field(f, r1), o->out);
4392 #define SPEC_wout_r1_32h 0
4394 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4396 int r1 = get_field(f, r1);
4397 store_reg32_i64(r1, o->out);
4398 store_reg32_i64(r1 + 1, o->out2);
4400 #define SPEC_wout_r1_P32 SPEC_r1_even
4402 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4404 int r1 = get_field(f, r1);
4405 store_reg32_i64(r1 + 1, o->out);
4406 tcg_gen_shri_i64(o->out, o->out, 32);
4407 store_reg32_i64(r1, o->out);
4409 #define SPEC_wout_r1_D32 SPEC_r1_even
4411 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4413 store_freg32_i64(get_field(f, r1), o->out);
4415 #define SPEC_wout_e1 0
4417 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4419 store_freg(get_field(f, r1), o->out);
4421 #define SPEC_wout_f1 0
4423 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4425 int f1 = get_field(s->fields, r1);
4426 store_freg(f1, o->out);
4427 store_freg(f1 + 2, o->out2);
4429 #define SPEC_wout_x1 SPEC_r1_f128
4431 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4433 if (get_field(f, r1) != get_field(f, r2)) {
4434 store_reg32_i64(get_field(f, r1), o->out);
4437 #define SPEC_wout_cond_r1r2_32 0
4439 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4441 if (get_field(f, r1) != get_field(f, r2)) {
4442 store_freg32_i64(get_field(f, r1), o->out);
4445 #define SPEC_wout_cond_e1e2 0
4447 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4449 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4451 #define SPEC_wout_m1_8 0
4453 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4455 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4457 #define SPEC_wout_m1_16 0
4459 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4461 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4463 #define SPEC_wout_m1_32 0
4465 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4467 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4469 #define SPEC_wout_m1_64 0
4471 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4473 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4475 #define SPEC_wout_m2_32 0
4477 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4479 /* XXX release reservation */
4480 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4481 store_reg32_i64(get_field(f, r1), o->in2);
4483 #define SPEC_wout_m2_32_r1_atomic 0
4485 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4487 /* XXX release reservation */
4488 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4489 store_reg(get_field(f, r1), o->in2);
4491 #define SPEC_wout_m2_64_r1_atomic 0
4493 /* ====================================================================== */
4494 /* The "INput 1" generators. These load the first operand to an insn. */
4496 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4498 o->in1 = load_reg(get_field(f, r1));
4500 #define SPEC_in1_r1 0
4502 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4504 o->in1 = regs[get_field(f, r1)];
4505 o->g_in1 = true;
4507 #define SPEC_in1_r1_o 0
4509 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4511 o->in1 = tcg_temp_new_i64();
4512 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4514 #define SPEC_in1_r1_32s 0
4516 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4518 o->in1 = tcg_temp_new_i64();
4519 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4521 #define SPEC_in1_r1_32u 0
4523 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4525 o->in1 = tcg_temp_new_i64();
4526 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4528 #define SPEC_in1_r1_sr32 0
4530 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4532 o->in1 = load_reg(get_field(f, r1) + 1);
4534 #define SPEC_in1_r1p1 SPEC_r1_even
4536 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4538 o->in1 = tcg_temp_new_i64();
4539 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4541 #define SPEC_in1_r1p1_32s SPEC_r1_even
4543 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4545 o->in1 = tcg_temp_new_i64();
4546 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4548 #define SPEC_in1_r1p1_32u SPEC_r1_even
4550 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4552 int r1 = get_field(f, r1);
4553 o->in1 = tcg_temp_new_i64();
4554 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4556 #define SPEC_in1_r1_D32 SPEC_r1_even
4558 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4560 o->in1 = load_reg(get_field(f, r2));
4562 #define SPEC_in1_r2 0
4564 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4566 o->in1 = tcg_temp_new_i64();
4567 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4569 #define SPEC_in1_r2_sr32 0
4571 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4573 o->in1 = load_reg(get_field(f, r3));
4575 #define SPEC_in1_r3 0
4577 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4579 o->in1 = regs[get_field(f, r3)];
4580 o->g_in1 = true;
4582 #define SPEC_in1_r3_o 0
4584 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4586 o->in1 = tcg_temp_new_i64();
4587 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4589 #define SPEC_in1_r3_32s 0
4591 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4593 o->in1 = tcg_temp_new_i64();
4594 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4596 #define SPEC_in1_r3_32u 0
4598 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4600 int r3 = get_field(f, r3);
4601 o->in1 = tcg_temp_new_i64();
4602 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4604 #define SPEC_in1_r3_D32 SPEC_r3_even
4606 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4608 o->in1 = load_freg32_i64(get_field(f, r1));
4610 #define SPEC_in1_e1 0
4612 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4614 o->in1 = fregs[get_field(f, r1)];
4615 o->g_in1 = true;
4617 #define SPEC_in1_f1_o 0
4619 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4621 int r1 = get_field(f, r1);
4622 o->out = fregs[r1];
4623 o->out2 = fregs[r1 + 2];
4624 o->g_out = o->g_out2 = true;
4626 #define SPEC_in1_x1_o SPEC_r1_f128
4628 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4630 o->in1 = fregs[get_field(f, r3)];
4631 o->g_in1 = true;
4633 #define SPEC_in1_f3_o 0
4635 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4637 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4639 #define SPEC_in1_la1 0
4641 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4643 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4644 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4646 #define SPEC_in1_la2 0
4648 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4650 in1_la1(s, f, o);
4651 o->in1 = tcg_temp_new_i64();
4652 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4654 #define SPEC_in1_m1_8u 0
4656 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4658 in1_la1(s, f, o);
4659 o->in1 = tcg_temp_new_i64();
4660 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4662 #define SPEC_in1_m1_16s 0
4664 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4666 in1_la1(s, f, o);
4667 o->in1 = tcg_temp_new_i64();
4668 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4670 #define SPEC_in1_m1_16u 0
4672 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4674 in1_la1(s, f, o);
4675 o->in1 = tcg_temp_new_i64();
4676 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4678 #define SPEC_in1_m1_32s 0
4680 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4682 in1_la1(s, f, o);
4683 o->in1 = tcg_temp_new_i64();
4684 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4686 #define SPEC_in1_m1_32u 0
4688 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4690 in1_la1(s, f, o);
4691 o->in1 = tcg_temp_new_i64();
4692 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4694 #define SPEC_in1_m1_64 0
4696 /* ====================================================================== */
4697 /* The "INput 2" generators. These load the second operand to an insn. */
4699 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4701 o->in2 = regs[get_field(f, r1)];
4702 o->g_in2 = true;
4704 #define SPEC_in2_r1_o 0
4706 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4708 o->in2 = tcg_temp_new_i64();
4709 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4711 #define SPEC_in2_r1_16u 0
4713 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4715 o->in2 = tcg_temp_new_i64();
4716 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4718 #define SPEC_in2_r1_32u 0
4720 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4722 int r1 = get_field(f, r1);
4723 o->in2 = tcg_temp_new_i64();
4724 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4726 #define SPEC_in2_r1_D32 SPEC_r1_even
4728 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4730 o->in2 = load_reg(get_field(f, r2));
4732 #define SPEC_in2_r2 0
4734 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4736 o->in2 = regs[get_field(f, r2)];
4737 o->g_in2 = true;
4739 #define SPEC_in2_r2_o 0
4741 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4743 int r2 = get_field(f, r2);
4744 if (r2 != 0) {
4745 o->in2 = load_reg(r2);
4748 #define SPEC_in2_r2_nz 0
4750 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4752 o->in2 = tcg_temp_new_i64();
4753 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4755 #define SPEC_in2_r2_8s 0
4757 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4759 o->in2 = tcg_temp_new_i64();
4760 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4762 #define SPEC_in2_r2_8u 0
4764 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4766 o->in2 = tcg_temp_new_i64();
4767 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4769 #define SPEC_in2_r2_16s 0
4771 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4773 o->in2 = tcg_temp_new_i64();
4774 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4776 #define SPEC_in2_r2_16u 0
4778 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4780 o->in2 = load_reg(get_field(f, r3));
4782 #define SPEC_in2_r3 0
4784 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4786 o->in2 = tcg_temp_new_i64();
4787 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4789 #define SPEC_in2_r3_sr32 0
4791 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4793 o->in2 = tcg_temp_new_i64();
4794 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4796 #define SPEC_in2_r2_32s 0
4798 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4800 o->in2 = tcg_temp_new_i64();
4801 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4803 #define SPEC_in2_r2_32u 0
4805 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4807 o->in2 = tcg_temp_new_i64();
4808 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4810 #define SPEC_in2_r2_sr32 0
4812 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4814 o->in2 = load_freg32_i64(get_field(f, r2));
4816 #define SPEC_in2_e2 0
4818 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4820 o->in2 = fregs[get_field(f, r2)];
4821 o->g_in2 = true;
4823 #define SPEC_in2_f2_o 0
4825 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4827 int r2 = get_field(f, r2);
4828 o->in1 = fregs[r2];
4829 o->in2 = fregs[r2 + 2];
4830 o->g_in1 = o->g_in2 = true;
4832 #define SPEC_in2_x2_o SPEC_r2_f128
4834 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4836 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4838 #define SPEC_in2_ra2 0
4840 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4842 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4843 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4845 #define SPEC_in2_a2 0
4847 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4849 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4851 #define SPEC_in2_ri2 0
4853 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4855 help_l2_shift(s, f, o, 31);
4857 #define SPEC_in2_sh32 0
4859 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4861 help_l2_shift(s, f, o, 63);
4863 #define SPEC_in2_sh64 0
4865 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4867 in2_a2(s, f, o);
4868 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4870 #define SPEC_in2_m2_8u 0
4872 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4874 in2_a2(s, f, o);
4875 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4877 #define SPEC_in2_m2_16s 0
4879 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4881 in2_a2(s, f, o);
4882 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4884 #define SPEC_in2_m2_16u 0
4886 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4888 in2_a2(s, f, o);
4889 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4891 #define SPEC_in2_m2_32s 0
4893 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4895 in2_a2(s, f, o);
4896 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4898 #define SPEC_in2_m2_32u 0
4900 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4902 in2_a2(s, f, o);
4903 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4905 #define SPEC_in2_m2_64 0
4907 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4909 in2_ri2(s, f, o);
4910 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4912 #define SPEC_in2_mri2_16u 0
4914 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4916 in2_ri2(s, f, o);
4917 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4919 #define SPEC_in2_mri2_32s 0
4921 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4923 in2_ri2(s, f, o);
4924 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4926 #define SPEC_in2_mri2_32u 0
4928 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4930 in2_ri2(s, f, o);
4931 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4933 #define SPEC_in2_mri2_64 0
4935 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4937 /* XXX should reserve the address */
4938 in1_la2(s, f, o);
4939 o->in2 = tcg_temp_new_i64();
4940 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4942 #define SPEC_in2_m2_32s_atomic 0
4944 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4946 /* XXX should reserve the address */
4947 in1_la2(s, f, o);
4948 o->in2 = tcg_temp_new_i64();
4949 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4951 #define SPEC_in2_m2_64_atomic 0
4953 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4955 o->in2 = tcg_const_i64(get_field(f, i2));
4957 #define SPEC_in2_i2 0
4959 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4961 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4963 #define SPEC_in2_i2_8u 0
4965 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4967 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4969 #define SPEC_in2_i2_16u 0
4971 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4973 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4975 #define SPEC_in2_i2_32u 0
4977 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4979 uint64_t i2 = (uint16_t)get_field(f, i2);
4980 o->in2 = tcg_const_i64(i2 << s->insn->data);
4982 #define SPEC_in2_i2_16u_shl 0
4984 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4986 uint64_t i2 = (uint32_t)get_field(f, i2);
4987 o->in2 = tcg_const_i64(i2 << s->insn->data);
4989 #define SPEC_in2_i2_32u_shl 0
4991 #ifndef CONFIG_USER_ONLY
4992 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4994 o->in2 = tcg_const_i64(s->fields->raw_insn);
4996 #define SPEC_in2_insn 0
4997 #endif
4999 /* ====================================================================== */
5001 /* Find opc within the table of insns. This is formulated as a switch
5002 statement so that (1) we get compile-time notice of cut-paste errors
5003 for duplicated opcodes, and (2) the compiler generates the binary
5004 search tree, rather than us having to post-process the table. */
5006 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5007 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5009 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5011 enum DisasInsnEnum {
5012 #include "insn-data.def"
5015 #undef D
5016 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5017 .opc = OPC, \
5018 .fmt = FMT_##FT, \
5019 .fac = FAC_##FC, \
5020 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5021 .name = #NM, \
5022 .help_in1 = in1_##I1, \
5023 .help_in2 = in2_##I2, \
5024 .help_prep = prep_##P, \
5025 .help_wout = wout_##W, \
5026 .help_cout = cout_##CC, \
5027 .help_op = op_##OP, \
5028 .data = D \
5031 /* Allow 0 to be used for NULL in the table below. */
5032 #define in1_0 NULL
5033 #define in2_0 NULL
5034 #define prep_0 NULL
5035 #define wout_0 NULL
5036 #define cout_0 NULL
5037 #define op_0 NULL
5039 #define SPEC_in1_0 0
5040 #define SPEC_in2_0 0
5041 #define SPEC_prep_0 0
5042 #define SPEC_wout_0 0
5044 static const DisasInsn insn_info[] = {
5045 #include "insn-data.def"
5048 #undef D
5049 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5050 case OPC: return &insn_info[insn_ ## NM];
5052 static const DisasInsn *lookup_opc(uint16_t opc)
5054 switch (opc) {
5055 #include "insn-data.def"
5056 default:
5057 return NULL;
5061 #undef D
5062 #undef C
5064 /* Extract a field from the insn. The INSN should be left-aligned in
5065 the uint64_t so that we can more easily utilize the big-bit-endian
5066 definitions we extract from the Principals of Operation. */
5068 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5070 uint32_t r, m;
5072 if (f->size == 0) {
5073 return;
5076 /* Zero extract the field from the insn. */
5077 r = (insn << f->beg) >> (64 - f->size);
5079 /* Sign-extend, or un-swap the field as necessary. */
5080 switch (f->type) {
5081 case 0: /* unsigned */
5082 break;
5083 case 1: /* signed */
5084 assert(f->size <= 32);
5085 m = 1u << (f->size - 1);
5086 r = (r ^ m) - m;
5087 break;
5088 case 2: /* dl+dh split, signed 20 bit. */
5089 r = ((int8_t)r << 12) | (r >> 8);
5090 break;
5091 default:
5092 abort();
5095 /* Validate that the "compressed" encoding we selected above is valid.
5096 I.e. we havn't make two different original fields overlap. */
5097 assert(((o->presentC >> f->indexC) & 1) == 0);
5098 o->presentC |= 1 << f->indexC;
5099 o->presentO |= 1 << f->indexO;
5101 o->c[f->indexC] = r;
5104 /* Lookup the insn at the current PC, extracting the operands into O and
5105 returning the info struct for the insn. Returns NULL for invalid insn. */
5107 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5108 DisasFields *f)
5110 uint64_t insn, pc = s->pc;
5111 int op, op2, ilen;
5112 const DisasInsn *info;
5114 insn = ld_code2(env, pc);
5115 op = (insn >> 8) & 0xff;
5116 ilen = get_ilen(op);
5117 s->next_pc = s->pc + ilen;
5119 switch (ilen) {
5120 case 2:
5121 insn = insn << 48;
5122 break;
5123 case 4:
5124 insn = ld_code4(env, pc) << 32;
5125 break;
5126 case 6:
5127 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5128 break;
5129 default:
5130 abort();
5133 /* We can't actually determine the insn format until we've looked up
5134 the full insn opcode. Which we can't do without locating the
5135 secondary opcode. Assume by default that OP2 is at bit 40; for
5136 those smaller insns that don't actually have a secondary opcode
5137 this will correctly result in OP2 = 0. */
5138 switch (op) {
5139 case 0x01: /* E */
5140 case 0x80: /* S */
5141 case 0x82: /* S */
5142 case 0x93: /* S */
5143 case 0xb2: /* S, RRF, RRE */
5144 case 0xb3: /* RRE, RRD, RRF */
5145 case 0xb9: /* RRE, RRF */
5146 case 0xe5: /* SSE, SIL */
5147 op2 = (insn << 8) >> 56;
5148 break;
5149 case 0xa5: /* RI */
5150 case 0xa7: /* RI */
5151 case 0xc0: /* RIL */
5152 case 0xc2: /* RIL */
5153 case 0xc4: /* RIL */
5154 case 0xc6: /* RIL */
5155 case 0xc8: /* SSF */
5156 case 0xcc: /* RIL */
5157 op2 = (insn << 12) >> 60;
5158 break;
5159 case 0xd0 ... 0xdf: /* SS */
5160 case 0xe1: /* SS */
5161 case 0xe2: /* SS */
5162 case 0xe8: /* SS */
5163 case 0xe9: /* SS */
5164 case 0xea: /* SS */
5165 case 0xee ... 0xf3: /* SS */
5166 case 0xf8 ... 0xfd: /* SS */
5167 op2 = 0;
5168 break;
5169 default:
5170 op2 = (insn << 40) >> 56;
5171 break;
5174 memset(f, 0, sizeof(*f));
5175 f->raw_insn = insn;
5176 f->op = op;
5177 f->op2 = op2;
5179 /* Lookup the instruction. */
5180 info = lookup_opc(op << 8 | op2);
5182 /* If we found it, extract the operands. */
5183 if (info != NULL) {
5184 DisasFormat fmt = info->fmt;
5185 int i;
5187 for (i = 0; i < NUM_C_FIELD; ++i) {
5188 extract_field(f, &format_info[fmt].op[i], insn);
5191 return info;
5194 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5196 const DisasInsn *insn;
5197 ExitStatus ret = NO_EXIT;
5198 DisasFields f;
5199 DisasOps o;
5201 /* Search for the insn in the table. */
5202 insn = extract_insn(env, s, &f);
5204 /* Not found means unimplemented/illegal opcode. */
5205 if (insn == NULL) {
5206 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5207 f.op, f.op2);
5208 gen_illegal_opcode(s);
5209 return EXIT_NORETURN;
5212 #ifndef CONFIG_USER_ONLY
5213 if (s->tb->flags & FLAG_MASK_PER) {
5214 TCGv_i64 addr = tcg_const_i64(s->pc);
5215 gen_helper_per_ifetch(cpu_env, addr);
5216 tcg_temp_free_i64(addr);
5218 #endif
5220 /* Check for insn specification exceptions. */
5221 if (insn->spec) {
5222 int spec = insn->spec, excp = 0, r;
5224 if (spec & SPEC_r1_even) {
5225 r = get_field(&f, r1);
5226 if (r & 1) {
5227 excp = PGM_SPECIFICATION;
5230 if (spec & SPEC_r2_even) {
5231 r = get_field(&f, r2);
5232 if (r & 1) {
5233 excp = PGM_SPECIFICATION;
5236 if (spec & SPEC_r3_even) {
5237 r = get_field(&f, r3);
5238 if (r & 1) {
5239 excp = PGM_SPECIFICATION;
5242 if (spec & SPEC_r1_f128) {
5243 r = get_field(&f, r1);
5244 if (r > 13) {
5245 excp = PGM_SPECIFICATION;
5248 if (spec & SPEC_r2_f128) {
5249 r = get_field(&f, r2);
5250 if (r > 13) {
5251 excp = PGM_SPECIFICATION;
5254 if (excp) {
5255 gen_program_exception(s, excp);
5256 return EXIT_NORETURN;
5260 /* Set up the strutures we use to communicate with the helpers. */
5261 s->insn = insn;
5262 s->fields = &f;
5263 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5264 TCGV_UNUSED_I64(o.out);
5265 TCGV_UNUSED_I64(o.out2);
5266 TCGV_UNUSED_I64(o.in1);
5267 TCGV_UNUSED_I64(o.in2);
5268 TCGV_UNUSED_I64(o.addr1);
5270 /* Implement the instruction. */
5271 if (insn->help_in1) {
5272 insn->help_in1(s, &f, &o);
5274 if (insn->help_in2) {
5275 insn->help_in2(s, &f, &o);
5277 if (insn->help_prep) {
5278 insn->help_prep(s, &f, &o);
5280 if (insn->help_op) {
5281 ret = insn->help_op(s, &o);
5283 if (insn->help_wout) {
5284 insn->help_wout(s, &f, &o);
5286 if (insn->help_cout) {
5287 insn->help_cout(s, &o);
5290 /* Free any temporaries created by the helpers. */
5291 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5292 tcg_temp_free_i64(o.out);
5294 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5295 tcg_temp_free_i64(o.out2);
5297 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5298 tcg_temp_free_i64(o.in1);
5300 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5301 tcg_temp_free_i64(o.in2);
5303 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5304 tcg_temp_free_i64(o.addr1);
5307 #ifndef CONFIG_USER_ONLY
5308 if (s->tb->flags & FLAG_MASK_PER) {
5309 /* An exception might be triggered, save PSW if not already done. */
5310 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5311 tcg_gen_movi_i64(psw_addr, s->next_pc);
5314 /* Save off cc. */
5315 update_cc_op(s);
5317 /* Call the helper to check for a possible PER exception. */
5318 gen_helper_per_check_exception(cpu_env);
5320 #endif
5322 /* Advance to the next instruction. */
5323 s->pc = s->next_pc;
5324 return ret;
5327 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5329 S390CPU *cpu = s390_env_get_cpu(env);
5330 CPUState *cs = CPU(cpu);
5331 DisasContext dc;
5332 target_ulong pc_start;
5333 uint64_t next_page_start;
5334 int num_insns, max_insns;
5335 ExitStatus status;
5336 bool do_debug;
5338 pc_start = tb->pc;
5340 /* 31-bit mode */
5341 if (!(tb->flags & FLAG_MASK_64)) {
5342 pc_start &= 0x7fffffff;
5345 dc.tb = tb;
5346 dc.pc = pc_start;
5347 dc.cc_op = CC_OP_DYNAMIC;
5348 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5350 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5352 num_insns = 0;
5353 max_insns = tb->cflags & CF_COUNT_MASK;
5354 if (max_insns == 0) {
5355 max_insns = CF_COUNT_MASK;
5357 if (max_insns > TCG_MAX_INSNS) {
5358 max_insns = TCG_MAX_INSNS;
5361 gen_tb_start(tb);
5363 do {
5364 tcg_gen_insn_start(dc.pc, dc.cc_op);
5365 num_insns++;
5367 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5368 status = EXIT_PC_STALE;
5369 do_debug = true;
5370 /* The address covered by the breakpoint must be included in
5371 [tb->pc, tb->pc + tb->size) in order to for it to be
5372 properly cleared -- thus we increment the PC here so that
5373 the logic setting tb->size below does the right thing. */
5374 dc.pc += 2;
5375 break;
5378 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5379 gen_io_start();
5382 status = NO_EXIT;
5383 if (status == NO_EXIT) {
5384 status = translate_one(env, &dc);
5387 /* If we reach a page boundary, are single stepping,
5388 or exhaust instruction count, stop generation. */
5389 if (status == NO_EXIT
5390 && (dc.pc >= next_page_start
5391 || tcg_op_buf_full()
5392 || num_insns >= max_insns
5393 || singlestep
5394 || cs->singlestep_enabled)) {
5395 status = EXIT_PC_STALE;
5397 } while (status == NO_EXIT);
5399 if (tb->cflags & CF_LAST_IO) {
5400 gen_io_end();
5403 switch (status) {
5404 case EXIT_GOTO_TB:
5405 case EXIT_NORETURN:
5406 break;
5407 case EXIT_PC_STALE:
5408 update_psw_addr(&dc);
5409 /* FALLTHRU */
5410 case EXIT_PC_UPDATED:
5411 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5412 cc op type is in env */
5413 update_cc_op(&dc);
5414 /* Exit the TB, either by raising a debug exception or by return. */
5415 if (do_debug) {
5416 gen_exception(EXCP_DEBUG);
5417 } else {
5418 tcg_gen_exit_tb(0);
5420 break;
5421 default:
5422 abort();
5425 gen_tb_end(tb, num_insns);
5427 tb->size = dc.pc - pc_start;
5428 tb->icount = num_insns;
5430 #if defined(S390X_DEBUG_DISAS)
5431 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5432 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5433 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5434 qemu_log("\n");
5436 #endif
5439 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5440 target_ulong *data)
5442 int cc_op = data[1];
5443 env->psw.addr = data[0];
5444 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5445 env->cc_op = cc_op;